Пример #1
0
def main():
    # set parameters
    with open('./configs/LDCT_test.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build up the denoiser
    netD = UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    net = {'D': netD}

    if Path(args['model_path']).is_file():
        print('=> Loading checkpoint {:s}'.format(str(Path(
            args['model_path']))))
        checkpoint = torch.load(str(Path(args['model_path'])),
                                map_location='cpu')
        netD.load_state_dict(checkpoint['model_state_dict']['D'])
        print('=> Loaded checkpoint {:s}'.format(args['model_path']))

        if Path(args['log_dir']).is_dir() and len(
                os.listdir(Path(args['log_dir']))) > 0:
            print('super babo')
            exit()
            # shutil.rmtree(args['log_dir'])
        if not Path(args['log_dir']).is_dir():
            Path(args['log_dir']).mkdir()

    else:
        sys.exit('Please provide corrected model path!')

    # making dataset ####
    # datasets = {'train':BenchmarkTrain(h5_file=args['SIDD_train_h5'],
    #                                    length=5000*args['batch_size']*args['num_critic'],
    #                                    pch_size=args['patch_size'],
    #                                    mask=False),
    #             'val':BenchmarkTest(args['SIDD_test_h5'])}
    datasets = {'val': LDCTTest512(args['SIDD_test_h5'])}

    # test model
    print('\nBegin testing with GPU: ' + str(args['gpu_id']))
    test_all(net, datasets, args)
Пример #2
0
from scipy.io import loadmat
from skimage import img_as_float32, img_as_ubyte
from matplotlib import pyplot as plt
from utils import PadUNet

parser = argparse.ArgumentParser()
parser.add_argument(
    '--model',
    type=str,
    default='GDANet+',
    help="Model selection: GDANet or GDANet+, (default:GDANet+)")
args = parser.parse_args()

# build the network
dep_U = 5
net = UNetD(3, wf=32, depth=dep_U).cuda()

# load the pretrained model
if args.model.lower() == 'gdanet':
    net.load_state_dict(
        torch.load('./model_states/GDANet.pt', map_location='cpu')['D'])
elif args.model.lower() == 'gdanet+':
    net.load_state_dict(
        torch.load('./model_states/GDANetPlus_fake025.pt', map_location='cpu'))
else:
    sys.exit('Please input the corrected model')

# read the images
im_noisy = loadmat('./test_data/DND/1.mat')['im_noisy']

# denoising
Пример #3
0
def main():
    # set parameters
    with open('./configs/DANetPlus.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build the network
    net = UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    netG = UNetG(_C, wf=args['wf'], depth=args['depth']).cuda()
    # load the generator
    netG.load_state_dict(torch.load(args['pretrain'], map_location='cpu')['G'])

    # optimizer
    optimizer = optim.Adam(net.parameters(), lr=args['lr_D'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               args['milestones'],
                                               gamma=0.5)

    if args['resume']:
        if Path(args['resume']).is_file():
            print('=> Loading checkpoint {:s}'.format(str(Path(
                args['resume']))))
            checkpoint = torch.load(args['resume'])
            args['epoch_start'] = checkpoint['epoch']
            args['step'] = checkpoint['step']
            args['step_img'] = checkpoint['step_img']
            args['clip_normD'] = checkpoint['clip_grad']
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            net.load_state_dict(checkpoint['model_state_dict'])
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(
                args['resume'], checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args['epoch_start'] = 0
        if Path(args['log_dir']).is_dir():
            shutil.rmtree(args['log_dir'])
        Path(args['log_dir']).mkdir()
        if Path(args['model_dir']).is_dir():
            shutil.rmtree(args['model_dir'])
        Path(args['model_dir']).mkdir()

    for key, value in args.items():
        print('{:<15s}: {:s}'.format(key, str(value)))

    # making dataset
    num_iters = ceil(8000 * (args['fake_ratio'] + 1))
    datasets = {
        'train':
        BenchmarkTrain(h5_file=args['SIDD_train_h5'],
                       length=num_iters * args['batch_size'],
                       pch_size=args['patch_size'],
                       mask=False),
        'val':
        BenchmarkTest(args['SIDD_test_h5'])
    }

    # train model
    print('\nBegin training with GPU: ' + str(args['gpu_id']))
    train_model(net, netG, datasets, optimizer, scheduler, args)
Пример #4
0
                    help="Data ensemble when testing (default: None)")
parser.add_argument('--save_path',
                    default='',
                    type=str,
                    metavar='PATH',
                    help="path to save the denoise result (default: None)")
args = parser.parse_args()

noisy_mat = loadmat(
    '/ssd1t/SIDD/BenchmarkNoisyBlocksSrgb.mat')['BenchmarkNoisyBlocksSrgb']
num_images, num_blocks, H, W, C = noisy_mat.shape

denoise_res = np.zeros_like(noisy_mat)

# load the model
net = UNetD(3)
net = torch.nn.DataParallel(net).cuda()
net.load_state_dict(torch.load(args.checkpoint, map_location='cpu'))

net.eval()
total_time = 0
for ii in range(num_images):
    print('The {:d} image'.format(ii + 1))
    for jj in range(num_blocks):
        pch_noisy = img_as_float32(noisy_mat[ii, jj, ])  # 256 x 256 x 3
        if args.flip:
            pch_denoise = np.zeros_like(pch_noisy)
            for flag in range(8):
                pch_noisy_flag = np.ascontiguousarray(
                    data_augmentation(pch_noisy, flag))
                pch_noisy_flag = torch.from_numpy(
Пример #5
0
def main():
    # set parameters
    with open('./configs/DANet.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build up the denoiser
    netD= UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    # build up the generator
    netG= UNetG(_C, wf=args['wf'], depth=args['depth']).cuda()
    # build up the discriminator
    # netP = DiscriminatorLinear(_C*2, ndf=args['ndf']).cuda() ####
    netP = DiscriminatorLinear(2, ndf=args['ndf']).cuda()
    net = {'D':netD, 'G':netG, 'P':netP}

    # optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=args['lr_D'])
    optimizerG = optim.Adam(netG.parameters(), lr=args['lr_G'], betas=(0.5, 0.90))
    optimizerP = optim.Adam(netP.parameters(), lr=args['lr_P'], betas=(0.5, 0.90))
    optimizer = {'D':optimizerD, 'G':optimizerG, 'P':optimizerP}
    # schular
    schedulerD = optim.lr_scheduler.MultiStepLR(optimizerD, args['milestones'], gamma=0.5)
    schedulerG = optim.lr_scheduler.MultiStepLR(optimizerG, args['milestones'], gamma=0.5)
    schedulerP = optim.lr_scheduler.MultiStepLR(optimizerP, args['milestones'], gamma=0.5)
    scheduler = {'D':schedulerD, 'G':schedulerG, 'P':schedulerP}

    if args['resume']:
        if Path(args['resume']).is_file():
            print('=> Loading checkpoint {:s}'.format(str(Path(args['resume']))))
            checkpoint = torch.load(str(Path(args['resume'])), map_location='cpu')
            args['epoch_start'] = checkpoint['epoch']
            args['step'] = checkpoint['step']
            args['step_img'] = checkpoint['step_img']
            optimizerD.load_state_dict(checkpoint['optimizer_state_dict']['D'])
            optimizerG.load_state_dict(checkpoint['optimizer_state_dict']['G'])
            optimizerP.load_state_dict(checkpoint['optimizer_state_dict']['P'])
            schedulerD.load_state_dict(checkpoint['lr_scheduler_state_dict']['D'])
            schedulerG.load_state_dict(checkpoint['lr_scheduler_state_dict']['G'])
            schedulerP.load_state_dict(checkpoint['lr_scheduler_state_dict']['P'])
            netD.load_state_dict(checkpoint['model_state_dict']['D'])
            netG.load_state_dict(checkpoint['model_state_dict']['G'])
            netP.load_state_dict(checkpoint['model_state_dict']['P'])
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(args['resume'], checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args['epoch_start'] = 0
        if Path(args['log_dir']).is_dir():
            print('babo')
            exit()
            # shutil.rmtree(args['log_dir'])
        Path(args['log_dir']).mkdir()
        if Path(args['model_dir']).is_dir():
            print('babo')
            exit()
            # shutil.rmtree(args['model_dir'])
        Path(args['model_dir']).mkdir()

    for key, value in args.items():
        print('{:<15s}: {:s}'.format(key,  str(value)))

    # making dataset ####
    # datasets = {'train':BenchmarkTrain(h5_file=args['SIDD_train_h5'],
    #                                    length=5000*args['batch_size']*args['num_critic'],
    #                                    pch_size=args['patch_size'],
    #                                    mask=False),
    #             'val':BenchmarkTest(args['SIDD_test_h5'])}
    datasets = {'train':LDCTTrain(h5_file=args['SIDD_train_h5'],
                                       length=5000*args['batch_size']*args['num_critic'],
                                       pch_size=args['patch_size'],
                                       mask=False),
                'val':LDCTTest(args['SIDD_test_h5'])}

    # build the Gaussian kernel for loss
    global kernel
    kernel = get_gausskernel(args['ksize'], chn=_C)

    # train model
    print('\nBegin training with GPU: ' + str(args['gpu_id']))
    train_epoch(net, datasets, optimizer, scheduler, args)
Пример #6
0
def main():
    # set parameters
    with open('./configs/GDANetPlus.json', 'r') as f:
        args = json.load(f)

    # set the available GPUs
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu_id'])

    # build the network
    net = UNetD(_C, wf=args['wf'], depth=args['depth']).cuda()
    netG = UNetG(_C, wf=args['wf'], depth=args['depth']).cuda()
    # load the generator
    netG.load_state_dict(torch.load(args['pretrain'], map_location='cpu')['G'])

    # optimizer
    optimizer = optim.Adam(net.parameters(), lr=args['lr_D'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               args['milestones'],
                                               gamma=0.5)

    if args['resume']:
        if Path(args['resume']).is_file():
            print('=> Loading checkpoint {:s}'.format(str(Path(
                args['resume']))))
            checkpoint = torch.load(args['resume'])
            args['epoch_start'] = checkpoint['epoch']
            args['step'] = checkpoint['step']
            args['step_img'] = checkpoint['step_img']
            args['clip_normD'] = checkpoint['clip_grad']
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            net.load_state_dict(checkpoint['model_state_dict'])
            print('=> Loaded checkpoint {:s} (epoch {:d})'.format(
                args['resume'], checkpoint['epoch']))
        else:
            sys.exit('Please provide corrected model path!')
    else:
        args['epoch_start'] = 0
        if Path(args['log_dir']).is_dir():
            shutil.rmtree(args['log_dir'])
        Path(args['log_dir']).mkdir()
        if Path(args['model_dir']).is_dir():
            shutil.rmtree(args['model_dir'])
        Path(args['model_dir']).mkdir()

    for key, value in args.items():
        print('{:<15s}: {:s}'.format(key, str(value)))

    # making dataset
    num_iters_sidd = 5000
    num_iters_renoir = 2000
    num_iters_poly = 1000
    num_iters_fake = ceil(
        (num_iters_sidd + num_iters_renoir + num_iters_poly) *
        args['fake_ratio'])
    path_list_Poly = sorted(
        [str(x) for x in Path(args['Poly_dir']).glob('*_real.JPG')])
    print('Number of images in Poly Dataset: {:d}'.format(len(path_list_Poly)))
    path_list_fake = sorted(
        [str(x) for x in Path(args['fake_dir']).glob('*/*.jpg')])
    print('Number of images in fake floder: {:d}'.format(len(path_list_fake)))
    datasets_list = [
        BenchmarkTrain(h5_file=args['SIDD_train_h5'],
                       length=num_iters_sidd * args['batch_size'],
                       pch_size=args['patch_size'],
                       mask=True),
        BenchmarkTrain(h5_file=args['Renoir_train_h5'],
                       length=num_iters_renoir * args['batch_size'],
                       pch_size=args['patch_size'],
                       mask=True),
        PolyuTrain(path_list=path_list_Poly,
                   length=num_iters_poly * args['batch_size'],
                   pch_size=args['patch_size'],
                   mask=True),
        FakeTrain(path_list=path_list_fake,
                  length=num_iters_fake * args['batch_size'],
                  pch_size=args['patch_size'])
    ]
    datasets = {
        'train': uData.ConcatDataset(datasets_list),
        'val': BenchmarkTest(args['SIDD_test_h5'])
    }

    # train model
    print('\nBegin training with GPU: ' + str(args['gpu_id']))
    train_model(net, netG, datasets, optimizer, scheduler, args)
Пример #7
0
                    default='DANet+',
                    help="Model selection: DANet or DANet+, (default:DANet+)")
args = parser.parse_args()

noisy_path = '/ssd1t/SIDD/ValidationNoisyBlocksSrgb.mat'
gt_path = '/ssd1t/SIDD/ValidationGtBlocksSrgb.mat'

noisy_imgs = loadmat(noisy_path)['ValidationNoisyBlocksSrgb']  # uint8 format
gt_imgs = loadmat(gt_path)['ValidationGtBlocksSrgb']

denoised_imgs = np.zeros_like(gt_imgs)
num_img, num_block, _, _, _ = noisy_imgs.shape
total_blocks = num_img * num_block

# load the network
netD = UNetD(3).cuda()
if args.model.lower() == 'danet':
    netD.load_state_dict(
        torch.load('./model_states/DANet.pt', map_location='cpu')['D'])
else:
    netD.load_state_dict(
        torch.load('./model_states/DANetPlus.pt', map_location='cpu'))
netD.eval()

psnr = ssim = 0
counter = 0
for ii in range(num_img):
    for jj in range(num_block):
        noisy_im_iter = img_as_float32(noisy_imgs[ii, jj, ].transpose(
            (2, 0, 1))[np.newaxis, ])
        gt_im_iter = gt_imgs[ii, jj, ]