Esempio n. 1
0
def main():
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.logdir, 'net.pth')))
    model.eval()
    # make output dirs
    os.makedirs(os.path.join('data', opt.test_data, 'noisy'), exist_ok=True)
    os.makedirs(os.path.join('data', opt.test_data, 'restored'), exist_ok=True)
    # load data info
    print('Loading data info ...\n')
    files_source = glob.glob(os.path.join('data', opt.test_data, '*.png'))
    files_source.sort()
    # process data
    psnr_test = 0
    for f in files_source:
        # image
        Img = cv2.imread(f)
        Img = normalize(np.float32(Img[:, :, 0]))
        Img = np.expand_dims(Img, 0)
        Img = np.expand_dims(Img, 1)
        ISource = torch.Tensor(Img)
        # noise
        noise = torch.FloatTensor(ISource.size()).normal_(mean=0,
                                                          std=opt.test_noiseL /
                                                          255.)
        # noisy image
        INoisy = ISource + noise
        img_noisy = np.array(INoisy.data.numpy().reshape(
            (INoisy.shape[2], INoisy.shape[3], 1)))
        img_noisy *= 255
        cv2.imwrite(f.replace('Set12', 'Set12/noisy'),
                    img_noisy.astype('uint8'))
        ISource, INoisy = Variable(ISource.cuda()), Variable(INoisy.cuda())
        with torch.no_grad():  # this can save much memory
            Out = torch.clamp(INoisy - model(INoisy), 0., 1.)
            img_res = np.array(Out.cpu().data.numpy().reshape(
                (Out.shape[2], Out.shape[3], 1)))
            img_res *= 255
            cv2.imwrite(f.replace('Set12', 'Set12/restored'),
                        img_res.astype('uint8'))
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported
        # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
        # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
        psnr = batch_PSNR(Out, ISource, 1.)
        psnr_test += psnr
        print("%s PSNR %f" % (f, psnr))
    psnr_test /= len(files_source)
    print("\nPSNR on test data %f" % psnr_test)
Esempio n. 2
0
def main():
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.save_model, 'net.pth')))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    files_source = glob.glob(os.path.join('data', opt.test_data, '*.png'))
    files_source.sort()
    # process data
    psnr_test = 0
    step = 0
    for f in files_source:
        # image
        Img = cv2.imread(f)
        Img = normalize(np.float32(Img[:, :, 0]))
        Img = np.expand_dims(Img, 0)
        Img = np.expand_dims(Img, 1)
        ISource = torch.Tensor(Img)
        # noise
        noise = torch.FloatTensor(ISource.size()).normal_(mean=0,
                                                          std=opt.test_noiseL /
                                                          255.)
        # noisy image
        INoisy = ISource + noise
        ISource, INoisy = Variable(ISource.cuda()), Variable(INoisy.cuda())
        with torch.no_grad():  # this can save much memory
            Out = torch.clamp(INoisy - model(INoisy), 0., 1.)
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported
        # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
        # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
        psnr = batch_PSNR(Out, ISource, 1.)
        psnr_test += psnr
        print("%s PSNR %f" % (f, psnr))

        if not os.path.exists(opt.output):
            os.mkdir(opt.output)
        cv2.imwrite(opt.output + '/' + "{}_pred.jpg".format(step),
                    save_image(Out))
        cv2.imwrite(opt.output + '/' + "{}_input.jpg".format(step),
                    save_image(INoisy))
        cv2.imwrite(opt.output + '/' + "{}_gt.jpg".format(step),
                    save_image(ISource))
        step += 1
    psnr_test /= len(files_source)
    print("\nPSNR on test data %f" % psnr_test)
Esempio n. 3
0
def main():
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.logdir, 'net.pth')))
    model.eval()
    # load data
    print('Loading data ...\n')
    dataset_test = Dataset(root=opt.data, crop_size=None)
    loader_test = DataLoader(dataset=dataset_test,
                             num_workers=4,
                             batch_size=1,
                             shuffle=False)
    # process data
    psnr_test = 0.
    ssim_test = 0.
    for i, (img_lr, img_hr) in enumerate(loader_test, 0):
        # image
        img_lr = img_lr.cuda()
        img_hr = img_hr.cuda()
        with torch.no_grad():  # this can save much memory
            learned_img = torch.clamp(img_lr - model(img_lr), 0., 1.)

        psnr = batch_PSNR(learned_img, img_hr, 1.)
        psnr_test += psnr
        # print("%s PSNR %f" % (f, psnr))

        ssim = batch_SSIM(learned_img, img_hr, 1.)
        ssim_test += ssim

        # TODO: write code to save images
        learned_img = Image.fromarray(
            (255 * learned_img[0, 0].cpu().data.numpy()).astype(np.uint8))
        filename = os.path.join('./results',
                                dataset_test.at(i).split(opt.data)[1])
        directory = os.path.dirname(filename)
        if not os.path.exists(directory):
            os.makedirs(directory)
        learned_img.save(os.path.join(filename))

    psnr_test = psnr_test / len(dataset_test)
    print("\nPSNR on test data %f" % psnr_test)

    ssim_test = ssim_test / len(dataset_test)
    print("\nSSIM on test data %f" % ssim_test)
Esempio n. 4
0
def main():
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.logdir, 'net.pth')))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    files_source = glob.glob(os.path.join('data', opt.test_data, '*.png'))
    files_source.sort()
    # process data
    psnr_test = 0
    for f in files_source:
        # image
        Img = cv2.imread(f)
        Img = normalize(np.float32(Img[:,:,0]))
        Img = np.expand_dims(Img, 0)
        Img = np.expand_dims(Img, 1)
        ISource = torch.Tensor(Img)
        # noise
        noise = torch.FloatTensor(ISource.size()).normal_(mean=0, std=opt.test_noiseL/255.)
        # noisy image
        INoisy = ISource + noise
        ISource, INoisy = Variable(ISource.cuda()), Variable(INoisy.cuda())
        with torch.no_grad(): # this can save much memory
            Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
            fig, ax = plt.subplots(nrows= 1, ncols=2)
            ax[0].imshow(np.array(Out).squeeze(), cmap='gray')
            ax[1].imshow(np.array(INoisy).squeeze(), cmap='gray')
            plt.show()
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported
        # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
        # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
        psnr = batch_PSNR(Out, ISource, 1.)
        psnr_test += psnr
        print("%s PSNR %f" % (f, psnr))
    psnr_test /= len(files_source)
    print("\nPSNR on test data %f" % psnr_test)
Esempio n. 5
0
        trainloop(experiment,
                  trainloader,
                  Preprocessing(),
                  log_data=True,
                  validloader=validloader)


if __name__ == '__main__':
    import argparse
    import os
    from utils import utils
    import torch

    parser = argparse.ArgumentParser(
        description='SARCNN for SAR image denoising')
    DnCNN.add_commandline_networkparams(parser, "dncnn", 64, 17, 3, "relu",
                                        True)

    # Optimizer
    parser.add_argument('--optimizer', default="adam",
                        choices=["adam", "sgd"])  # which optimizer to use
    # parameters for Adam
    parser.add_argument("--adam.beta1", type=float, default=0.9)
    parser.add_argument("--adam.beta2", type=float, default=0.999)
    parser.add_argument("--adam.eps", type=float, default=1e-8)
    parser.add_argument("--adam.weightdecay", type=float, default=1e-4)
    parser.add_argument('--adam.lr', type=float, default=0.001)
    # parameters for SGD
    parser.add_argument("--sgd.momentum", type=float, default=0.9)
    parser.add_argument("--sgd.weightdecay", type=float, default=1e-4)
    parser.add_argument('--sgd.lr', type=float, default=0.001)
Esempio n. 6
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True,
                            data_path_A=opt.A,
                            data_path_B=opt.B,
                            data_path_val_A=opt.val_A,
                            data_path_val_B=opt.val_B,
                            patch_size_dn=30,
                            patch_size_sr=120,
                            stride=5,
                            aug_times=2,
                            if_reseize=True)
    dataset_val = Dataset(train=False,
                          data_path_A=opt.A,
                          data_path_B=opt.B,
                          data_path_val_A=opt.val_A,
                          data_path_val_B=opt.val_B,
                          patch_size_dn=30,
                          patch_size_sr=120,
                          stride=5,
                          aug_times=2,
                          if_reseize=True)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    #net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False)
    #criterion = L1_Charbonnier_loss()

    # Move to GPU
    device_ids = [opt.device_ids]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):

            img_A_train, img_LB_data = Variable(data[0]), Variable(
                data[1])  #, requires_grad=False
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()

            img_L_train, img_B_train = torch.split(img_LB_data, 1, dim=1)
            difference = img_B_train - img_L_train

            img_A_train, img_L_train, img_B_train = Variable(
                img_A_train.cuda()), Variable(img_L_train.cuda()), Variable(
                    img_B_train.cuda())
            difference = Variable(difference.cuda())

            out_train, s_out_train = model(img_B_train)
            #loss_dn = criterion(out_train, img_L_train) / (img_B_train.size()[0]*2)
            loss_dn = criterion(out_train,
                                difference) / (img_B_train.size()[0] * 2)
            loss_dn.backward(retain_graph=True)
            loss = loss_dn + 1e-6 * criterion(
                s_out_train, img_A_train) / (img_A_train.size()[0] * 2)
            #loss =  1e-2 * loss + 1e-1 * loss_x4
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            out_train = torch.clamp(out_train, 0., 1.)

            psnr_train = batch_PSNR(out_train, img_L_train, 1.)
            print(
                "[epoch %d][%d/%d] loss: %.4f loss_dn: %.4f PSNR_train: %.4f" %
                (epoch + 1, i + 1, len(loader_train), loss.item(),
                 loss_dn.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
            torch.save(
                model.state_dict(),
                os.path.join(opt.outf, "epoch_%d_net.pth" % (epoch + 1)))
        ## the end of each epoch
        model.eval()
        # validate
        psnr_val = 0
        for k in range(len(dataset_val)):
            img_val_A = torch.unsqueeze(dataset_val[k][0], 0)
            imgn_val_B = torch.unsqueeze(dataset_val[k][1], 0)
            #difference = imgn_val_B - img_val_A
            img_val_A, imgn_val_B = Variable(img_val_A.cuda()), Variable(
                imgn_val_B.cuda())
            out_val, s_out_val = model(imgn_val_B)

            s_out_val = torch.clamp(s_out_val, 0., 1.)
            s_out_val = Variable(s_out_val.cuda(), requires_grad=False)

            out_val = torch.clamp(out_val, 0., 1.)
            out_val = Variable(out_val.cuda(), requires_grad=False)

            psnr_val += batch_PSNR(s_out_val, img_val_A, 1.)
        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch + 1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)

        # log the images
        out_train, s_out_train = model(img_B_train)
        out_train = torch.clamp(img_B_train - out_train, 0., 1.)
        Img_A = utils.make_grid(img_A_train.data,
                                nrow=8,
                                normalize=True,
                                scale_each=True)
        Img_B = utils.make_grid(img_B_train.data,
                                nrow=8,
                                normalize=True,
                                scale_each=True)
        Irecon = utils.make_grid(out_train.data,
                                 nrow=8,
                                 normalize=True,
                                 scale_each=True)
        writer.add_image('clean image', Img_A, epoch)
        writer.add_image('input image', Img_B, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))

        img_A_save = torch.clamp(out_train, 0., 1.)
        img_A_save = img_A_save[0, :, :].cpu()
        img_A_save = img_A_save[0].detach().numpy().astype(np.float32) * 255
        #print(np.amax(img_A_save))
        cv2.imwrite(os.path.join(opt.outf, "%#04dA.png" % (step)), img_A_save)

        img_B_save = torch.clamp(s_out_train, 0., 1.)
        img_B_save = img_B_save[0, :, :].cpu()
        img_B_save = img_B_save[0].detach().numpy().astype(np.float32) * 255
        #print(np.amax(img_A_save))
        cv2.imwrite(os.path.join(opt.outf, "%#04dB.png" % (step)), img_B_save)
Esempio n. 7
0
def main():
    writer = SummaryWriter(opt.output)
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.logdir, opt.net)))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    files_source_A = glob.glob(os.path.join('datasets', opt.test_A, '*.*'))
    files_source_B = glob.glob(os.path.join('datasets', opt.test_B, '*.*'))

    files_source_A.sort()
    files_source_B.sort()
    # process data
    psnr_test = 0
    psnr_D_test = 0
    for f in range(len(files_source_A)):
        # image
        Img_A = cv2.imread(files_source_A[f])

        Img_B = cv2.imread(files_source_B[f])
        if opt.mode == 'X':
            #pass
            h, w, c = Img_A.shape
            Img_D = cv2.resize(Img_B, (h, w), interpolation=cv2.INTER_CUBIC)
        Img_A = normalize(np.float32(Img_A[:, :, 0]))
        Img_A = np.expand_dims(Img_A, 0)
        Img_A = np.expand_dims(Img_A, 1)

        Img_B = normalize(np.float32(Img_B[:, :, 0]))
        Img_B = np.expand_dims(Img_B, 0)
        Img_B = np.expand_dims(Img_B, 1)

        Img_D = normalize(np.float32(Img_D[:, :, 0]))
        Img_D = np.expand_dims(Img_D, 0)
        Img_D = np.expand_dims(Img_D, 1)

        I_A = torch.Tensor(Img_A)
        I_B = torch.Tensor(Img_B)
        I_D = torch.Tensor(Img_D)
        I_A, I_B, I_D = Variable(I_A.cuda()), Variable(I_B.cuda()), Variable(
            I_D.cuda())
        with torch.no_grad():  # this can save much memory
            Out, s_Out = model(I_B)
            s_Out = torch.clamp(s_Out, 0., 1.)
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported

        psnr = batch_PSNR(s_Out, I_A, 1.)
        psnr_test += psnr
        psnr_D = batch_PSNR(I_D, I_A, 1.)
        psnr_D_test += psnr_D
        print("%s output PSNR %f" % (f, psnr))
        print("%s input PSNR %f" % (f, psnr_D))

        Out = Out[0, :, :].cpu()
        Out = Out[0].numpy().astype(np.float32) * 255

        s_Out = s_Out[0, :, :].cpu()
        s_Out = s_Out[0].numpy().astype(np.float32) * 255
        cv2.imwrite(
            os.path.join(opt.output, "%#04d.png" % (f + opt.start_index)),
            s_Out)

    psnr_test /= len(files_source_A)
    print("\nPSNR on output data %f" % psnr_test)
    psnr_D_test /= len(files_source_A)
    print("\nPSNR on input data %f" % psnr_D_test)

    I_A = I_A[0, :, :].cpu()
    I_A = I_A[0].numpy().astype(np.float32)
    I_D = I_D[0, :, :].cpu()
    I_D = I_D[0].numpy().astype(np.float32)

    fig = plt.figure()

    ax = plt.subplot("131")
    ax.imshow(I_A, cmap='gray')
    ax.set_title("GT")

    ax = plt.subplot("132")
    ax.imshow(I_D, cmap='gray')
    ax.set_title("Input(with 'realistic' difference & bicubic)")

    ax = plt.subplot("133")
    ax.imshow(s_Out, cmap='gray')
    ax.set_title("Output(sDnCNN)")
    plt.show()
Esempio n. 8
0
def main():

    # Load dataset
    print('Loading dataset ...\n')
    dataset = Dataset(img_avg=opt.img_avg,
                      patch_size=opt.patch_size,
                      stride=opt.stride)
    loader = DataLoader(dataset=dataset,
                        num_workers=4,
                        batch_size=opt.batch_size,
                        shuffle=True)
    print(f'{len(dataset)} training sample pairs loaded.')

    # Build model
    print(f'** Creating {opt.net} network **\n')
    model_channels = 1

    if opt.net == 'D':
        net = DnCNN(channels=model_channels, num_of_layers=17)
#     elif opt.net == 'DF':
#         net = DnCNN_BUIFD(channels=model_channels, num_of_layers=17)
    elif opt.net == 'M':
        net = MemNet(in_channels=model_channels)
#     elif opt.net == 'MF':
#         net = MemNet_BUIFD(in_channels=model_channels)
    elif opt.net == 'R':
        net = RIDNET(in_channels=model_channels)
    else:
        raise NotImplemented('Network model not implemented.')
    net.apply(weights_init_kaiming)

    # Loss metric
    criterion = nn.MSELoss(size_average=False)

    # Move to GPU
    model = nn.DataParallel(net).cuda()
    criterion.cuda()
    print('Trainable parameters: ',
          sum(p.numel() for p in model.parameters() if p.requires_grad))

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    # Training
    loss_log = np.zeros(opt.epochs)
    loss_batch_log = []

    for epoch in range(opt.epochs):
        start_time = timer()

        # Learning rate
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / (10.)
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('\nLearning rate = %f' % current_lr)

        # Train
        for idx, (noisy, target) in enumerate(loader):

            model.train()
            model.zero_grad()
            optimizer.zero_grad()

            # Training step
            noise = noisy - target
            target, noisy = Variable(target.cuda()), Variable(noisy.cuda())
            noise = Variable(noise.cuda())

            #             if opt.net[-1] != 'F':
            if opt.net == 'R':
                predicted_noise = noisy - model(noisy)
            else:
                predicted_noise = model(noisy)
            loss_noise = criterion(predicted_noise,
                                   noise) / (noisy.size()[0] * 2)
            loss = loss_noise

            #             else:
            #                 out_train, out_noise_level_train = model(imgn_train)

            #                 loss_img = criterion(out_train, noise) / (imgn_train.size()[0]*2)
            #                 loss_noise_level = criterion(out_noise_level_train, noise_level_train) / (imgn_train.size()[0]*2)
            #                 loss = loss_img + loss_noise_level

            loss.backward()
            optimizer.step()

            loss_batch_log.append(loss.item())
            #             loss_image_log[epoch] += loss_img.item()
            #             loss_noise_level_log[epoch] += loss_noise_level.item()
            loss_log[epoch] += loss.item()

        # Average out over all batches in the epoch


#         loss_image_log[epoch] = loss_image_log[epoch] / len(loader_train)
#         loss_noise_level_log[epoch] = loss_noise_level_log[epoch] / len(loader_train)
        loss_log[epoch] = loss_log[epoch] / len(loader)

        # Save model
        model_name = f'{opt.net}_{opt.img_avg}'
        model_dir = os.path.join('../../net_data/trained_denoisers/',
                                 model_name)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        torch.save(model.state_dict(),
                   os.path.join(model_dir, f'epoch_{epoch}.pth'))

        # Save logs and settings
        if ((epoch + 1) % 10) == 0:
            log_dict = {
                'loss_log': loss_log,
                #'loss_image_log': loss_image_log,
                #'loss_noise_level_log': loss_noise_level_log,
                'loss_batch_log': np.asarray(loss_batch_log)
            }
            fname = os.path.join(model_dir, 'log_dict.pkl')
            with open(fname, 'wb') as f:
                pickle.dump(log_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
            print('wrote', fname)

            settings_dict = {'opt': opt}
            fname = os.path.join(model_dir, 'settings_dict.pkl')
            with open(fname, 'wb') as f:
                pickle.dump(settings_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
            print('wrote', fname)

        # Ending-epoch message
        end_time = timer()
        print(
            f'Epoch {epoch} ({(end_time - start_time)/60.0:.1f} min):    loss={loss_log[epoch]:.4f}'
        )

    print(f'Training {opt.net} complete for all epochs.')
Esempio n. 9
0
def main():
    writer = SummaryWriter(opt.output)
    #Upsample_4x = nn.Upsample(scale_factor=4, mode='bilinear')
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.logdir, opt.net)))
    model.eval()

    

    # load data info
    print('Loading data info ...\n')
    files_source_A = glob.glob(os.path.join('datasets', opt.test_A, '*.*'))
    files_source_B = glob.glob(os.path.join('datasets', opt.test_B, '*.*'))

    files_source_A.sort()
    files_source_B.sort()
    # process data
    psnr_predict_avg = 0
    psnr_defect_avg = 0
    for f in range(len(files_source_A)):
        # image
        Img_A = cv2.imread(files_source_A[f])
        Img_B = cv2.imread(files_source_B[f])
        if opt.mode ==  'S':
            h, w, c = Img_A.shape
            Img_B = cv2.resize(Img_B, (h, w), interpolation=cv2.INTER_CUBIC)
        Img_A = normalize(np.float32(Img_A[:,:,0]))
        Img_A = np.expand_dims(Img_A, 0)
        Img_A = np.expand_dims(Img_A, 1)

        Img_B = normalize(np.float32(Img_B[:,:,0]))
        Img_B = np.expand_dims(Img_B, 0)
        Img_B = np.expand_dims(Img_B, 1)

        I_A = torch.Tensor(Img_A)
        I_B = torch.Tensor(Img_B)
        I_A, I_B = Variable(I_A.cuda()), Variable(I_B.cuda())
        with torch.no_grad(): # this can save much memory
            output = model(I_B)
            output = torch.clamp(I_B - output, 0., 1.)
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported

        
        psnr_predict = batch_PSNR(output, I_A, 1.)
        psnr_predict_avg += psnr_predict
        psnr_defect = batch_PSNR(I_B, I_A, 1.)
        psnr_defect_avg += psnr_defect
        print("%s output psnr_predict %f" % (f, psnr_predict))
        print("%s input psnr_predict %f" % (f, psnr_defect))

        output= output[0,:,:].cpu()
        output= output[0].numpy().astype(np.float32)*255
        cv2.imwrite(os.path.join(opt.output, "%#04d.png" % (f+opt.start_index)), output)
        

    psnr_predict_avg /= len(files_source_A)
    print("\nPSNR on output data %f" % psnr_predict_avg)
    psnr_defect_avg /= len(files_source_A)
    print("\nPSNR on input data %f" % psnr_defect_avg)
    

    I_A = I_A[0,:,:].cpu()
    I_A = I_A[0].numpy().astype(np.float32)
    I_B= I_B[0,:,:].cpu()
    I_B= I_B[0].numpy().astype(np.float32)


    fig = plt.figure()

    ax = plt.subplot("131")
    ax.imshow(I_A, cmap='gray')
    ax.set_title("GT")

    ax = plt.subplot("132")
    ax.imshow(I_B, cmap='gray')
    ax.set_title("defective input")

    ax = plt.subplot("133")
    ax.imshow(output, cmap='gray')
    ax.set_title("Output(DnCNN)")
    plt.show()
Esempio n. 10
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    dataset_val = Dataset(train=False)
    loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False)
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_A_train = data[:,0]
            img_B_train = data[:,1,:,:]

            difference = img_B_train - img_A_train
            print(difference.size())

            img_A_train, img_B_train = Variable(img_A_train.cuda()), Variable(img_B_train.cuda())
            difference = Variable(difference.cuda())
            out_train = model(img_B_train)
            loss = criterion(out_train, difference) / (img_B_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            out_train = torch.clamp(img_B_train-model(img_B_train), 0., 1.)
            psnr_train = batch_PSNR(out_train, img_A_train, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
            torch.save(model.state_dict(), os.path.join(opt.outf,"epoch_%d_net.pth" %(epoch+1)))
        ## the end of each epoch
        model.eval()
        # validate
        psnr_val = 0
        
        for k in range(len(dataset_val)):
            img_val_A = torch.unsqueeze(dataset_val[k][0], 0)
            imgn_val_B = torch.unsqueeze(dataset_val[k][1], 0)
            difference = imgn_val_B - img_val_A
            img_val_A, imgn_val_B = Variable(img_val_A.cuda()), Variable(imgn_val_B.cuda())
            out_val = torch.clamp(model(imgn_val_B), 0., 1.)
            psnr_val += batch_PSNR(out_val, img_val_A, 1.)
        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch+1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        
        # log the images
        out_train = torch.clamp(img_B_train-model(img_B_train), 0., 1.)
        Img_A = utils.make_grid(img_A_train.data, nrow=8, normalize=True, scale_each=True)
        Img_B = utils.make_grid(img_B_train.data, nrow=8, normalize=True, scale_each=True)
        Irecon = utils.make_grid(out_train.data, nrow=8, normalize=True, scale_each=True)
        writer.add_image('clean image', Img_A, epoch)
        writer.add_image('input image', Img_B, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))
Esempio n. 11
0
SHUFFLE_BUFFER_SIZE = 100
TEST_SIZE = 200

#%%
x = load_image_data()

#%%
x = x.unbatch()

x = x.batch(batch_size=10)
shuffled_data = x.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE)
test = shuffled_data.take(TEST_SIZE).repeat()
train = shuffled_data.skip(TEST_SIZE).repeat()
#%%

model = DnCNN(depth=17)
model.compile(optimizer=keras.optimizers.Adam(),
              loss=dcnn_loss,
              metrics=[psnr])

now = datetime.now()
tensorboard_callback = keras.callbacks.TensorBoard(
    log_dir='logs\log_from_{}'.format(now.strftime("%Y-%m-%d_at_%H-%M-%S")),
    histogram_freq=1)

model.fit(x=train,
          steps_per_epoch=1000,
          validation_data=test,
          epochs=5,
          validation_steps=50,
          callbacks=[tensorboard_callback])
Esempio n. 12
0
def main():
    # 加载训练集
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))

    # 加载模型
    net = DnCNN(channels=1, num_of_layers=17)
    net.apply(weights_init_kaiming)  # 权重初始化

    # 使用GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    #     criterion.cuda()

    # 定义损失和优化器
    criterion = nn.MSELoss(size_average=False)
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    # 使用tensorboardx可视化训练曲线和指标
    time_now = datetime.now().isoformat()
    if not os.path.exists(opt.log_dir):
        os.mkdir(opt.log_dir)
    writer = SummaryWriter(log_dir=os.path.join(opt.log_dir, time_now))

    step = 0
    for epoch in range(opt.epochs):

        # 设置学习率
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            #             current_lr = opt.lr / 10.
            current_lr = opt.lr
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)

        # 开始训练
        total_loss = 0
        psnr_train = 0
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data

            noise = torch.FloatTensor(img_train.size()).normal_(
                mean=0, std=opt.noiseL / 255.)
            imgn_train = img_train + noise
            #             print(imgn_train.shape)
            img_train, imgn_train = Variable(img_train.cuda()), Variable(
                imgn_train.cuda())
            noise = Variable(noise.cuda())
            out_train = model(imgn_train)
            loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2)
            loss.backward()
            optimizer.step()

            # 统计loss和计算psnr,并显示
            out_train = torch.clamp(imgn_train - out_train, 0., 1.)
            psnr_train += batch_PSNR(out_train, img_train, 1.)
            total_loss += loss.item()
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                  (epoch + 1, i + 1, len(loader_train), total_loss /
                   (i + 1), psnr_train / (i + 1)))
            writer.add_scalar('loss', total_loss / (i + 1), step)
            writer.add_scalar('PSNR on training data', psnr_train / (i + 1),
                              step)

            # 保存训练图片和模型
            step += 1
            if step % 500 == 0:
                if not os.path.exists(opt.image_path):
                    os.mkdir(opt.image_path)
                cv2.imwrite(opt.image_path + '/' + "{}_pred.jpg".format(step),
                            save_image(out_train))
                cv2.imwrite(opt.image_path + '/' + "{}_input.jpg".format(step),
                            save_image(imgn_train))
                cv2.imwrite(opt.image_path + '/' + "{}_gt.jpg".format(step),
                            save_image(img_train))
        if not os.path.exists(opt.save_model):
            os.makedirs(opt.save_model)
        torch.save(model.state_dict(), os.path.join(opt.save_model, 'net.pth'))
Esempio n. 13
0
def main():
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=3, num_of_layers=opt.num_of_layers)
    print(net)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(
        torch.load(os.path.join(opt.logdir, 'netB50color.pth')))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    #gray images: png
    files_source = glob.glob(os.path.join('data', opt.test_data, '*.MP4'))
    files_source.sort()
    # process data
    psnr_test = 0

    cap = cv2.VideoCapture(files_source[0])
    fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
    out = cv2.VideoWriter('outpy.avi', fourcc, 60, (2 * 1280, 720))
    curFrame = 0
    while (cap.isOpened):
        curFrame = curFrame + 1

        ret, frame = cap.read()
        frame_width = int(cap.get(3))
        frame_height = int(cap.get(4))
        if ret == True:
            Img = frame
            color_flag = 1

            if color_flag == 0:
                # imag  +0e
                Img = normalize(np.float32(Img[:, :, 0]))
                Img = np.expand_dims(Img, 0)
                Img = np.expand_dims(Img, 1)
                ISource = torch.Tensor(Img)
                # noise
                noise = torch.FloatTensor(ISource.size()).normal_(
                    mean=0, std=opt.test_noiseL / 255.)
                # noisy image
                INoisy = ISource + noise
                ISource, INoisy = Variable(ISource.cuda()), Variable(
                    INoisy.cuda())
                with torch.no_grad():  # this can save much memory
                    Out = torch.clamp(INoisy - model(INoisy), 0., 1.)
                ## if you are using older version of PyTorch, torch.no_grad() may not be supported
                # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
                # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
                psnr = batch_PSNR(Out, ISource, 1.)
                psnr_test += psnr
                print("%s PSNR %f" % (f, psnr))

        #For color images
            else:
                dims = [0, 1, 2]
                times = 0

                #                Img = cv2.cvtColor(Img, cv2.COLOR_BGR2RGB)
                Img2 = Img
                #  print(Img2.shape)
                s1Img = np.zeros(Img.shape, dtype=np.float32)
                s2Img = np.zeros(Img.shape, dtype=np.float32)
                noiseAll = np.zeros(Img.shape, dtype=np.float32)

                resultImg = np.zeros(Img.shape, dtype=np.float32)
                # imag  +0e
                for i in dims:
                    Img = normalize(np.float32(Img2[:, :, i]))

                    s1Img[:, :, i] = Img

                s1Img = s1Img.transpose((2, 0, 1))
                s1Img = np.expand_dims(s1Img, 0)
                ISource = torch.Tensor(s1Img)

                # print(ISource.shape)
                # noisy image
                INoisy = ISource
                ISource, INoisy = Variable(ISource.cuda()), Variable(
                    INoisy.cuda())
                with torch.no_grad():  # this can save much memory
                    noise_get = model(INoisy)
                    Out = torch.clamp(INoisy - noise_get, 0., 1.)
                ## if you are using older version of PyTorch, torch.no_grad() may not be supported
                # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
                # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
#                psnr = batch_PSNR(Out, ISource, 1.)
#                psnr_test += psnr
#                print("%s PSNR %f" % (f, psnr))
                resultImg = Out.cpu().numpy()
                sImg = INoisy.cpu().numpy()
                noiseAll = noise_get.cpu().numpy()
                sImg = np.squeeze(sImg)
                resultImg = np.squeeze(resultImg)
                sImg = sImg.transpose((1, 2, 0))
                resultImg = resultImg.transpose((1, 2, 0))
                a = np.hstack((sImg, resultImg))

                a = np.uint8(a * 255)
                save_result(a,
                            path=os.path.join(opt.save_dir,
                                              str(times) + '.png'))
                out.write(a)
                times = times + 1
                print('---------curFrame: %d' % (curFrame))

    cap.release()
    out.release()