コード例 #1
0
def validation(model_path, file_data):
    if torch.cuda.is_available():
        model = NET.VDN_NET(in_channels=3, depth_snet=5).cuda()
        model.load_state_dict(torch.load(model_path))
    else:
        model = NET.VDN_NET(in_channels=3, depth_snet=5)
        model.load_state_dict(torch.load(model_path, map_location='cpu'))
    model.eval()
    avg_psnr_validation = 0.0
    avg_ssim_validation = 0.0
    obj_data = gd.ValidationBenchmark(h5_file_=file_data)
    if torch.cuda.is_available():
        model.cuda()
        torch.backends.cudnn.benchmark = True
    for idx in range(obj_data.__len__()):
        noisy, image = obj_data.__getitem__(idx)
        ch, ht, wt = noisy.shape
        noisy = noisy.view(1, ch, ht, wt).cuda()
        image = image.cuda()
        model_out, _ = model(noisy)
        noise = noisy - model_out[:, :ch, ].detach().data
        clean_img_pred = noise.view(ch, ht, wt).permute(1, 2, 0).clamp(0, 1)
        image = image.view(ch, ht, wt).permute(1, 2, 0)
        avg_psnr_validation += psnr(image * 255, clean_img_pred * 255)
        avg_ssim_validation += compare_ssim(img_as_ubyte(image.cpu().numpy()),
                                            img_as_ubyte(clean_img_pred.cpu().numpy()),
                                            win_size=11, data_range=255, multichannel=True, gaussian_weights=True)
    print("average validation PSNR = ", avg_psnr_validation / obj_data.__len__())
    print("average validation SSIM = ", avg_ssim_validation / obj_data.__len__())
コード例 #2
0
def testCase1():
    qp = 15
    step = 16
    im = plt.imread("E:/liumangxuxu/code/PyCodec/modules/lena2.tif").astype(
        int)
    print(im.shape)
    residual_1D, mode_1D = predictImage(im, qp, step)

    i_im = inversePredictImage(residual_1D, mode_1D, qp, im.shape[0],
                               im.shape[1], step)

    plt.figure()
    plt.imshow(i_im, cmap='gray')
    plt.title("Inverse image")

    logging.debug("Inverse Image PSNR:")
    logging.debug(tools.psnr(im, i_im))

    plt.show()
コード例 #3
0
def processCheck():
    # 4x4 block test data set
    # N = 4
    # S = np.array([[1, 2, 2, 0],
    #               [0, 1, 3, 1],
    #               [0, 1, 2, 1],
    #               [1, 2, 2, -1]])

    # real 8x8 image test data
    N = 8
    S = np.array([[182, 196, 199, 201, 203, 201, 199, 173],
                  [175, 180, 176, 142, 148, 152, 148, 120],
                  [148, 118, 123, 115, 114, 107, 108, 107],
                  [115, 110, 110, 112, 105, 109, 101, 100],
                  [104, 106, 106, 102, 104, 95, 98, 105],
                  [99, 115, 131, 104, 118, 86, 87, 133],
                  [112, 154, 154, 107, 140, 97, 88, 151],
                  [145, 158, 178, 123, 132, 140, 138, 133]])

    # random test data
    # N = 8   # axis of 1D vector
    # S = np.random.randint(0, 100, size=[N, N])

    print("\nTest 2D %dx%d matrix:" % (N, N))
    print(S)

    # calculate DCT patterns by formula
    temp, S_i = dct_detail(S, N)
    diff = tools.psnr(S, np.round(S_i))
    print("\nPSNR is: ", np.around(diff, 2))

    # check the coefficients by Scipy
    T_scipy = np.round(block2dct(S), 4)
    print("\nthe expansion coefficients using SciPy:")
    print(T_scipy)

    #something wrong with below calculation
    S_scipy = np.round(dct2block(T_scipy), 4)
    print("\nInverse Result using SciPy:")
    print(S_scipy)
コード例 #4
0
def processWholeImage():
    im = plt.imread("lena2.tif").astype(float)
    print(im.shape)

    BLOCK_WIDTH = 8
    # using Scipy way
    dct = Img2DctUsingScipy(im, BLOCK_WIDTH)
    img_dct = Dct2ImgUsingScipy(dct, BLOCK_WIDTH)

    # using my own calculation way
    #dct, img_dct = ImgDctUsingDetail(im, BLOCK_WIDTH)

    plt.figure()
    plt.imshow(dct, cmap='gray', vmax=np.max(dct) * 0.01, vmin=0)
    plt.title("8x8 DCTs of the image")

    plt.figure()
    plt.imshow(np.hstack((im, img_dct)), cmap='gray')
    plt.title("Comparison between original and DCT compressed images")

    diff = tools.psnr(im, img_dct)
    print("\nPSNR is: ", np.around(diff, 2))

    plt.show()
コード例 #5
0
def simulated_noise_test(noise_type,
                         model_path,
                         plot_noise=False,
                         stats=False,
                         plot_images=False,
                         conf1=False):
    test_sets = {"LIVE1": "*.bmp", "CBSD68": "*.png", "Set5": "*.bmp"}
    test_paths = sorted(
        gd.load_data(os.path.join("datasets", "test_data"), test_sets))
    data_obj = gd.TestDataset(test_paths, noise_type=noise_type, iid=True)
    if torch.cuda.is_available():
        if conf1:
            model = NET1.VDN_NET(in_channels=3, depth_snet=5).cuda()
        else:
            model = NET.VDN_NET(in_channels=3, depth_snet=5).cuda()
        model.load_state_dict(torch.load(model_path))
    else:
        model = NET.VDN_NET(in_channels=3, depth_snet=5)
        model.load_state_dict(torch.load(model_path, map_location='cpu'))
    model.eval()
    PSNR = 0.0
    SSIM = 0.0
    for idx in range(data_obj.__len__()):
        image, noisy = data_obj.__getitem__(idx)
        if torch.cuda.is_available():
            image, noisy = Variable(image.cuda()), Variable(noisy.cuda())
        _, ch, ht, wt = noisy.shape
        model_out, model_out2 = model(noisy)
        alpha = model_out2[:, :ch, ]
        beta = model_out2[:, ch:, ]
        result = beta / (alpha + 1)
        noise = noisy - model_out[:, :ch, ]
        clean_img_pred = noise.view(ch, ht, wt).permute(1, 2, 0).clamp(0, 1)
        image = image.view(ch, ht, wt).permute(1, 2, 0)
        if stats:
            PSNR += psnr(image * 255, clean_img_pred * 255)
            SSIM += compare_ssim(img_as_ubyte(image.cpu().detach().numpy()),
                                 img_as_ubyte(
                                     clean_img_pred.cpu().detach().numpy()),
                                 multichannel=True)
        if plot_noise:
            fig = plt.figure()
            ax = plt.axes(projection='3d')
            x = np.arange(0, ht, 1)
            y = np.arange(0, wt, 1)
            X, Y = np.meshgrid(x, y)
            Z1 = np.exp((img_as_float(result[0, 1, ].detach().numpy())))
            surf = ax.plot_surface(X[:100, :100],
                                   Y[:100, :100],
                                   Z1[:100, :100],
                                   rstride=1,
                                   cstride=1,
                                   cmap="viridis",
                                   antialiased=True,
                                   edgecolor="none")
            plt.savefig("simulation_results/approx_noise2_" + str(idx) +
                        ".png")
            plt.show()
        if plot_images:
            plt.subplots_adjust(wspace=0.2)
            plt.subplot(131)
            plt.imshow(image[100:250, 100:250, ])
            plt.title('Groundtruth')
            plt.subplot(132)
            plt.imshow(noisy[0].permute(1, 2, 0)[100:250, 100:250, ])
            plt.title('Noisy Image')
            plt.subplot(133)
            plt.imshow(
                img_as_ubyte(clean_img_pred.detach().numpy())[100:250,
                                                              100:250, ])
            plt.title('Denoised Image')
            plt.savefig("simulation_results/denoised_simlation" + str(idx) +
                        ".png")
            plt.show()
    print("average PSNR = ", PSNR / data_obj.__len__())
    print("average SSIM = ", SSIM / data_obj.__len__())
コード例 #6
0
def training_model_VDN(arg, milestones):
    logging.basicConfig(filename=arg.log_path, level=logging.INFO)  # log file
    logging.info('Started')
    if not os.path.exists(arg.model_path):
        os.makedirs(arg.model_path)
    # load training data
    train_sets = {
        "BSDS500": "*.jpg",
        "Waterloo": "*.bmp",
        "ImageNet": "*.JPEG"
    }
    if arg.confnet == 1:
        model = VDN_NET_one(in_channels=arg.channels, depth_snet=arg.snet)
    else:
        model = VDN_NET(in_channels=arg.channels, depth_snet=arg.snet)
    model = model.float()
    train_paths = np.array(sorted(gd.load_data("datasets", train_sets)))
    ran = bool(arguments.randomize)
    clipping = bool(arguments.clipping)
    data_obj = gd.TrainDataset(train_paths,
                               randomize=ran,
                               patch_size=arg.patch)
    if torch.cuda.is_available():
        model.cuda()
        torch.backends.cudnn.benchmark = True
        data = DataLoader(dataset=data_obj,
                          drop_last=True,
                          batch_size=arg.batch,
                          shuffle=True,
                          num_workers=arg.workers)
    else:
        data = DataLoader(dataset=data_obj,
                          drop_last=True,
                          batch_size=arg.batch,
                          shuffle=True)

    # network parameters
    epsilon = np.sqrt(5.0e-6)
    p_window = 7
    optimizer = optim.Adam(model.parameters(), lr=2e-4)
    scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=arg.gamma)
    if clipping:
        gadient_clip_Dnet = 1000.0
        gadient_clip_Snet = 50.0
        Dnet_parameters = [
            x for name, x in model.named_parameters()
            if 'dnet' in name.lower()
        ]
        Snet_parameters = [
            x for name, x in model.named_parameters()
            if 'snet' in name.lower()
        ]

    print("Training model simulation now!")

    for epoch in range(arg.epochs):
        if clipping:
            grad_D = 0.0
            grad_S = 0.0
        epoch_avg_loss = 0.0
        mse_avg = 0.0
        psnr_avg = 0.0
        ssim_avg = 0.0
        lr = optimizer.param_groups[0]['lr']
        if lr < arg.learning:
            print("reach min learning rate at epoch" + str(epoch))
        for i, batch_data in enumerate(data):
            if torch.cuda.is_available():
                x_batch, y_batch, sigma_arr = Variable(
                    batch_data[0]).cuda(), Variable(
                        batch_data[1]).cuda(), Variable(batch_data[2]).cuda()
            else:
                x_batch, y_batch, sigma_arr = batch_data[0], batch_data[
                    1], batch_data[2]
            optimizer.zero_grad()
            out_D, out_s = model(y_batch)
            loss, loglikelihood, kl_z, kl_sigma = loss_func.get_loss(
                x_batch, y_batch, sigma_arr, p_window,
                out_D[:, :arg.channels, :, :], out_D[:, arg.channels:, :, :],
                out_s[:, :arg.channels, :, :], out_s[:, arg.channels:, :, :],
                epsilon)
            loss.backward()
            if clipping:
                full_grad_D = nn.utils.clip_grad_norm_(Dnet_parameters,
                                                       gadient_clip_Dnet)
                full_grad_S = nn.utils.clip_grad_norm_(Snet_parameters,
                                                       gadient_clip_Snet)
                grad_D = (grad_D * (i / (i + 1)) + full_grad_D / (i + 1))
                grad_S = (grad_S * (i / (i + 1)) + full_grad_S / (i + 1))
            optimizer.step()
            epoch_avg_loss += loss.detach().item()
            predicted_image = y_batch - out_D[:, :arg.channels, :, :].detach(
            ).data
            predicted_image = predicted_image.clamp(0, 1)
            mse = calc_MSE(predicted_image, x_batch)
            mse_avg += mse
            psnr_avg += psnr(predicted_image * 255, x_batch * 255)
            ssim_avg += calculate_ssim(img_as_ubyte(
                predicted_image.permute(2, 3, 1, 0).cpu().numpy()),
                                       img_as_ubyte(
                                           x_batch.permute(2, 3, 1,
                                                           0).cpu().numpy()),
                                       multichannel=True)
            if i == 0:
                print("First ForwardPAss\n Loss: {}, MSE: {}".format(
                    loss.detach().item(), mse))
            if (i + 1) % 100 == 0:
                print("{} - Loss: {}, MSE:{}, epoch:{}".format(
                    i + 1, loss.item(), mse, epoch + 1))
            if i >= 5000:
                break
        if clipping:
            gadient_clip_Dnet = min(gadient_clip_Dnet, grad_D)
            gadient_clip_Dnet = min(gadient_clip_Dnet, grad_S)
        print("----------------------------------------------------------")
        print(
            "Epoch: {},  Avg MSE:{},  Avg Epoch Loss:{},  Avg PSNR:{}, Avg SSIM : {}, LR:{}"
            .format(epoch + 1, mse_avg / (i + 1), epoch_avg_loss / (i + 1),
                    psnr_avg / (i + 1), ssim_avg / (i + 1), lr))
        logging.info("av loss: {}, epoch: {}".format(epoch_avg_loss / (i + 1),
                                                     epoch + 1))
        scheduler.step()
        if epoch % arguments.epoch_save == 0:
            torch.save(
                model.state_dict(),
                os.path.join(arg.model_path,
                             "model_" + str(epoch) + "_epochs.pth"))
            print("saved model as" + arg.model_path)
    print("Finished Training...\n Saving model now.....\n")
    torch.save(model.state_dict(),
               os.path.join(arg.model_path, "final_model.pth"))
    print("saved model as" + os.path.join(arg.model_path, "final_model.pth"))
コード例 #7
0
def training_benchmark(arg, milestones):
    logging.basicConfig(filename=arg.log_path, level=logging.INFO)  # log file
    logging.info('Started')
    if not os.path.exists(arg.model_path):
        os.makedirs(arg.model_path)
    model = VDN_NET(in_channels=arg.channels, depth_snet=arg.snet)
    model = model.float()
    clipping = bool(arguments.clipping)

    # Load training data
    obj_data = gd.TrainBenchmark(h5_file_=arg.train_data,
                                 patch_size=arg.patch,
                                 window=11,
                                 radius=5)
    if torch.cuda.is_available():
        model.cuda()
        torch.backends.cudnn.benchmark = True
        data = DataLoader(obj_data,
                          batch_size=arg.batch,
                          shuffle=True,
                          num_workers=arg.workers,
                          pin_memory=True)
    else:
        data = DataLoader(obj_data, batch_size=arg.batch, shuffle=True)

    # network parameters
    epsilon = np.sqrt(1.0e-6)
    p_window = 7
    if clipping:
        gadient_clip_Dnet = 1000.0
        gadient_clip_Snet = 50.0
        Dnet_parameters = [
            x for name, x in model.named_parameters()
            if 'dnet' in name.lower()
        ]
        Snet_parameters = [
            x for name, x in model.named_parameters()
            if 'snet' in name.lower()
        ]
    optimizer = optim.Adam(model.parameters(), lr=2e-4)
    scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=arg.gamma)

    print("Training model Benchmark now!")

    for epoch in range(arg.epochs):
        tic = time.time()
        if clipping:
            grad_D = 0.0
            grad_S = 0.0
        epoch_avg_loss = 0.0
        mse_avg = 0.0
        psnr_avg = 0.0
        ssim_avg = 0.0
        lr = optimizer.param_groups[0]['lr']
        if lr < arg.learning:
            print("reach min learning rate at epoch" + str(epoch))
        model.train()
        for i, batch_data in enumerate(data):
            if torch.cuda.is_available():
                y_batch, x_batch, sigma_arr = Variable(
                    batch_data[0]).cuda(), Variable(
                        batch_data[1]).cuda(), Variable(batch_data[2]).cuda()
            else:
                y_batch, x_batch, sigma_arr = batch_data[0], batch_data[
                    1], batch_data[2]
            optimizer.zero_grad()
            out_D, out_s = model(y_batch)
            loss, loglikelihood, kl_z, kl_sigma = loss_func.get_loss(
                x_batch, y_batch, sigma_arr, p_window,
                out_D[:, :arg.channels, :, :], out_D[:, arg.channels:, :, :],
                out_s[:, :arg.channels, :, :], out_s[:, arg.channels:, :, :],
                epsilon)
            loss.backward()
            if clipping:
                full_grad_D = nn.utils.clip_grad_norm_(Dnet_parameters,
                                                       gadient_clip_Dnet)
                full_grad_S = nn.utils.clip_grad_norm_(Snet_parameters,
                                                       gadient_clip_Snet)
                grad_D = (grad_D * (i / (i + 1)) + full_grad_D / (i + 1))
                grad_S = (grad_S * (i / (i + 1)) + full_grad_S / (i + 1))
            optimizer.step()
            epoch_avg_loss += loss.detach().item()
            predicted_image = y_batch - out_D[:, :arg.channels, :, :].detach(
            ).data
            predicted_image = predicted_image.clamp(0, 1)
            mse = calc_MSE(predicted_image, x_batch)
            mse_avg += mse
            psnr_avg += psnr(predicted_image * 255, x_batch * 255)
            ssim_avg += calculate_ssim(img_as_ubyte(
                predicted_image.permute(2, 3, 1, 0).cpu().numpy()),
                                       img_as_ubyte(
                                           x_batch.permute(2, 3, 1,
                                                           0).cpu().numpy()),
                                       multichannel=True)
            if i == 0:
                print("First ForwardPAss\n Loss: {}, MSE: {}".format(
                    loss.detach().item(), mse))
            if (i + 1) % 100 == 0:
                print("{} - Loss: {}, MSE:{}, epoch:{}".format(
                    i + 1, loss.item(), mse, epoch + 1))
            if i >= 5000:
                break
        if clipping:
            gadient_clip_Dnet = min(gadient_clip_Dnet, grad_D)
            gadient_clip_Dnet = min(gadient_clip_Dnet, grad_S)
        print("----------------------------------------------------------")
        print(
            "Epoch: {},  Avg MSE:{},  Avg Epoch Loss:{},  Avg PSNR:{}, Avg SSIM : {}, LR:{}"
            .format(epoch + 1, mse_avg / (i + 1), epoch_avg_loss / (i + 1),
                    psnr_avg / (i + 1), ssim_avg / (i + 1), lr))
        logging.info("av loss: {}, epoch: {}".format(epoch_avg_loss / (i + 1),
                                                     epoch + 1))
        # --------------- here comes the validation!  ---------------
        model.eval()
        avg_psnr_validation = 0.0
        avg_ssim_validation = 0.0
        obj_data = gd.ValidationBenchmark(h5_file_=arg.val_data)
        if torch.cuda.is_available():
            model.cuda()
            torch.backends.cudnn.benchmark = True
        for idx in range(obj_data.__len__()):
            noisy, image = obj_data.__getitem__(idx)
            ch, ht, wt = noisy.shape
            noisy = noisy.view(1, ch, ht, wt).cuda()
            image = image.cuda()
            model_out, _ = model(noisy)
            noise = noisy - model_out[:, :ch, ].detach().data
            clean_img_pred = noise.view(ch, ht, wt).permute(1, 2,
                                                            0).clamp(0, 1)
            image = image.view(ch, ht, wt).permute(1, 2, 0)
            avg_psnr_validation += psnr(image * 255, clean_img_pred * 255)
            avg_ssim_validation += compare_ssim(
                img_as_ubyte(image.cpu().numpy()),
                img_as_ubyte(clean_img_pred.cpu().numpy()),
                win_size=11,
                data_range=255,
                multichannel=True,
                gaussian_weights=True)
        print("average validation PSNR = ",
              avg_psnr_validation / obj_data.__len__())
        print("average validation SSIM = ",
              avg_ssim_validation / obj_data.__len__())

        # -------------- finish validation ---------------------------------
        scheduler.step()
        toc = time.time()
        print('Time for this epoch: {:.2f}'.format(toc - tic))
        if epoch % arguments.epoch_save == 0:
            torch.save(
                model.state_dict(),
                os.path.join(arg.model_path,
                             "model_" + str(epoch) + "_epochs.pth"))
            print("saved model as" + arg.model_path)
    print("Finished Training...\n Saving model now.....\n")
    torch.save(model.state_dict(),
               os.path.join(arg.model_path, "final_model.pth"))
    print("saved model as" + os.path.join(arg.model_path, "final_model.pth"))