Exemple #1
0
def main():
    # set parameters
    with open('./configs/DANetPlus.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build the network
    net = UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    netG = UNetG(_C, wf=args['wf'], depth=args['depth']).cuda()
    # load the generator
    netG.load_state_dict(torch.load(args['pretrain'], map_location='cpu')['G'])

    # optimizer
    optimizer = optim.Adam(net.parameters(), lr=args['lr_D'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               args['milestones'],
                                               gamma=0.5)

    if args['resume']:
        if Path(args['resume']).is_file():
            print('=> Loading checkpoint {:s}'.format(str(Path(
                args['resume']))))
            checkpoint = torch.load(args['resume'])
            args['epoch_start'] = checkpoint['epoch']
            args['step'] = checkpoint['step']
            args['step_img'] = checkpoint['step_img']
            args['clip_normD'] = checkpoint['clip_grad']
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            net.load_state_dict(checkpoint['model_state_dict'])
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(
                args['resume'], checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args['epoch_start'] = 0
        if Path(args['log_dir']).is_dir():
            shutil.rmtree(args['log_dir'])
        Path(args['log_dir']).mkdir()
        if Path(args['model_dir']).is_dir():
            shutil.rmtree(args['model_dir'])
        Path(args['model_dir']).mkdir()

    for key, value in args.items():
        print('{:<15s}: {:s}'.format(key, str(value)))

    # making dataset
    num_iters = ceil(8000 * (args['fake_ratio'] + 1))
    datasets = {
        'train':
        BenchmarkTrain(h5_file=args['SIDD_train_h5'],
                       length=num_iters * args['batch_size'],
                       pch_size=args['patch_size'],
                       mask=False),
        'val':
        BenchmarkTest(args['SIDD_test_h5'])
    }

    # train model
    print('\nBegin training with GPU: ' + str(args['gpu_id']))
    train_model(net, netG, datasets, optimizer, scheduler, args)
Exemple #2
0
# Power by Zongsheng Yue 2020-07-10 14:38:39
'''
In this demo, we only test the model on one image of SIDD validation dataset.
The full validation dataset can be download from the following website:
    https://www.eecs.yorku.ca/~kamel/sidd/benchmark.php
'''

import torch
from cv2 import imread
from networks import UNetG, sample_generator
from skimage import img_as_float32, img_as_ubyte
from matplotlib import pyplot as plt
from utils import PadUNet

# build the network
net = UNetG(3, wf=32, depth=5).cuda()

# load the pretrained model
net.load_state_dict(
    torch.load('./model_states/GDANet.pt', map_location='cpu')['G'])

# read the images
im_gt = imread('./test_data/106.jpg')[:, :, ::-1]

# denoising
inputs = torch.from_numpy(img_as_float32(im_gt).transpose(
    [2, 0, 1])).unsqueeze(0).cuda()
with torch.autograd.no_grad():
    padunet = PadUNet(inputs, dep_U=5)
    inputs_pad = padunet.pad()
    outputs_pad = sample_generator(net, inputs_pad)
Exemple #3
0
def main():
    # set parameters
    with open('./configs/DANet.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build up the denoiser
    netD= UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    # build up the generator
    netG= UNetG(_C, wf=args['wf'], depth=args['depth']).cuda()
    # build up the discriminator
    # netP = DiscriminatorLinear(_C*2, ndf=args['ndf']).cuda() ####
    netP = DiscriminatorLinear(2, ndf=args['ndf']).cuda()
    net = {'D':netD, 'G':netG, 'P':netP}

    # optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=args['lr_D'])
    optimizerG = optim.Adam(netG.parameters(), lr=args['lr_G'], betas=(0.5, 0.90))
    optimizerP = optim.Adam(netP.parameters(), lr=args['lr_P'], betas=(0.5, 0.90))
    optimizer = {'D':optimizerD, 'G':optimizerG, 'P':optimizerP}
    # schular
    schedulerD = optim.lr_scheduler.MultiStepLR(optimizerD, args['milestones'], gamma=0.5)
    schedulerG = optim.lr_scheduler.MultiStepLR(optimizerG, args['milestones'], gamma=0.5)
    schedulerP = optim.lr_scheduler.MultiStepLR(optimizerP, args['milestones'], gamma=0.5)
    scheduler = {'D':schedulerD, 'G':schedulerG, 'P':schedulerP}

    if args['resume']:
        if Path(args['resume']).is_file():
            print('=> Loading checkpoint {:s}'.format(str(Path(args['resume']))))
            checkpoint = torch.load(str(Path(args['resume'])), map_location='cpu')
            args['epoch_start'] = checkpoint['epoch']
            args['step'] = checkpoint['step']
            args['step_img'] = checkpoint['step_img']
            optimizerD.load_state_dict(checkpoint['optimizer_state_dict']['D'])
            optimizerG.load_state_dict(checkpoint['optimizer_state_dict']['G'])
            optimizerP.load_state_dict(checkpoint['optimizer_state_dict']['P'])
            schedulerD.load_state_dict(checkpoint['lr_scheduler_state_dict']['D'])
            schedulerG.load_state_dict(checkpoint['lr_scheduler_state_dict']['G'])
            schedulerP.load_state_dict(checkpoint['lr_scheduler_state_dict']['P'])
            netD.load_state_dict(checkpoint['model_state_dict']['D'])
            netG.load_state_dict(checkpoint['model_state_dict']['G'])
            netP.load_state_dict(checkpoint['model_state_dict']['P'])
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(args['resume'], checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args['epoch_start'] = 0
        if Path(args['log_dir']).is_dir():
            print('babo')
            exit()
            # shutil.rmtree(args['log_dir'])
        Path(args['log_dir']).mkdir()
        if Path(args['model_dir']).is_dir():
            print('babo')
            exit()
            # shutil.rmtree(args['model_dir'])
        Path(args['model_dir']).mkdir()

    for key, value in args.items():
        print('{:<15s}: {:s}'.format(key,  str(value)))

    # making dataset ####
    # datasets = {'train':BenchmarkTrain(h5_file=args['SIDD_train_h5'],
    #                                    length=5000*args['batch_size']*args['num_critic'],
    #                                    pch_size=args['patch_size'],
    #                                    mask=False),
    #             'val':BenchmarkTest(args['SIDD_test_h5'])}
    datasets = {'train':LDCTTrain(h5_file=args['SIDD_train_h5'],
                                       length=5000*args['batch_size']*args['num_critic'],
                                       pch_size=args['patch_size'],
                                       mask=False),
                'val':LDCTTest(args['SIDD_test_h5'])}

    # build the Gaussian kernel for loss
    global kernel
    kernel = get_gausskernel(args['ksize'], chn=_C)

    # train model
    print('\nBegin training with GPU: ' + str(args['gpu_id']))
    train_epoch(net, datasets, optimizer, scheduler, args)
Exemple #4
0
def main():
    # set parameters
    with open('./configs/GDANetPlus.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build the network
    net = UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    netG = UNetG(_C, wf=args['wf'], depth=args['depth']).cuda()
    # load the generator
    netG.load_state_dict(torch.load(args['pretrain'], map_location='cpu')['G'])

    # optimizer
    optimizer = optim.Adam(net.parameters(), lr=args['lr_D'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               args['milestones'],
                                               gamma=0.5)

    if args['resume']:
        if Path(args['resume']).is_file():
            print('=> Loading checkpoint {:s}'.format(str(Path(
                args['resume']))))
            checkpoint = torch.load(args['resume'])
            args['epoch_start'] = checkpoint['epoch']
            args['step'] = checkpoint['step']
            args['step_img'] = checkpoint['step_img']
            args['clip_normD'] = checkpoint['clip_grad']
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            net.load_state_dict(checkpoint['model_state_dict'])
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(
                args['resume'], checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args['epoch_start'] = 0
        if Path(args['log_dir']).is_dir():
            shutil.rmtree(args['log_dir'])
        Path(args['log_dir']).mkdir()
        if Path(args['model_dir']).is_dir():
            shutil.rmtree(args['model_dir'])
        Path(args['model_dir']).mkdir()

    for key, value in args.items():
        print('{:<15s}: {:s}'.format(key, str(value)))

    # making dataset
    num_iters_sidd = 5000
    num_iters_renoir = 2000
    num_iters_poly = 1000
    num_iters_fake = ceil(
        (num_iters_sidd + num_iters_renoir + num_iters_poly) *
        args['fake_ratio'])
    path_list_Poly = sorted(
        [str(x) for x in Path(args['Poly_dir']).glob('*_real.JPG')])
    print('Number of images in Poly Dataset: {:d}'.format(len(path_list_Poly)))
    path_list_fake = sorted(
        [str(x) for x in Path(args['fake_dir']).glob('*/*.jpg')])
    print('Number of images in fake floder: {:d}'.format(len(path_list_fake)))
    datasets_list = [
        BenchmarkTrain(h5_file=args['SIDD_train_h5'],
                       length=num_iters_sidd * args['batch_size'],
                       pch_size=args['patch_size'],
                       mask=True),
        BenchmarkTrain(h5_file=args['Renoir_train_h5'],
                       length=num_iters_renoir * args['batch_size'],
                       pch_size=args['patch_size'],
                       mask=True),
        PolyuTrain(path_list=path_list_Poly,
                   length=num_iters_poly * args['batch_size'],
                   pch_size=args['patch_size'],
                   mask=True),
        FakeTrain(path_list=path_list_fake,
                  length=num_iters_fake * args['batch_size'],
                  pch_size=args['patch_size'])
    ]
    datasets = {
        'train': uData.ConcatDataset(datasets_list),
        'val': BenchmarkTest(args['SIDD_test_h5'])
    }

    # train model
    print('\nBegin training with GPU: ' + str(args['gpu_id']))
    train_model(net, netG, datasets, optimizer, scheduler, args)