Пример #1
0
def compareWatermark(wOriginal, wExtracted, imgMode):
    wOriginal = im.loadImage(wOriginal)
    if imgMode == "GRAYSCALE":
        wOriginal = im.grayscale(wOriginal)
    else:
        wOriginal = im.binarization(wOriginal)
    wExtracted = im.loadImage(wExtracted)
    #print(im.imgSize(wOriginal), im.imgSize(wExtracted))
    if (im.imgSize(wOriginal) != im.imgSize(wExtracted)):
        wExtracted = fixSizeImg(wOriginal, wExtracted, imgMode)
    wOriginal = ImageToFlattedArray(wOriginal)
    wExtracted = ImageToFlattedArray(wExtracted)
    #print(len(wOriginal), len(wExtracted))
    p = m.correlationIndex(wOriginal, wExtracted)
    psnr = m.PSNR(wOriginal, wExtracted)
    return m.binaryDetection(p, 0.7), psnr
Пример #2
0
def test(batch_size):
    avgPSNR = 0.0
    avgSSIM = 0.0
    counter = 0
    data_loader = CreateDataLoader(batch_size)
    dataset = data_loader.load_data()
    for i, data in enumerate(dataset):
        counter += 1
        images_X = data['A']
        images_Y = data['B']

        #G.eval()
        images_X = images_X.to(device)
        generated = G(images_X)
        # generated[generated < 0] = 0

        generated = generated.cpu().detach().numpy()
        x_test = images_X.cpu().float().numpy()
        y_test = images_Y.cpu().float().numpy()

        # generated = output.cpu().detach().numpy()
        # images_X = images_X.cpu().detach().numpy()
        # images_Y = images_Y.detach().numpy()
        for j in range(generated.shape[0]):
            y = y_test[j, :, :, :]  # original sharp
            x = x_test[j, :, :, :]  # blurred
            img = generated[j, :, :, :]  # generated

            out = np.concatenate((y, x, img), axis=1)
            img = (np.transpose(img, (1, 2, 0)) + 1) / 2.0 * 255.0
            img = img.astype(np.uint8)
            y = (np.transpose(y, (1, 2, 0)) + 1) / 2.0 * 255.0
            y = y.astype(np.uint8)
            psnr = metrics.PSNR(img, y)
            avgPSNR += psnr / 4
            ssim = metrics.SSIM_my(img, y)
            avgSSIM += ssim / 4
            z = i * 10 + j

            im = Image.fromarray(img)
            im.save("results100{}.png".format(z))
            #im1 = Image.fromarray(y)
            #im1.save("original{}.png".format(z))
    return avgPSNR, avgSSIM, counter
Пример #3
0
def main():
    # paths to the models
    model_paths = [
        os.path.join("..", "models", "SRDense-Type-3_ep80.h5"),
        os.path.join("..", "models", "srdense-norm.h5"),
        os.path.join("..", "models", "srresnet85.h5"),
        os.path.join("..", "models", "gen_model90.h5"),
        os.path.join("..", "models", "srgan60.h5"),
        os.path.join("..", "models", "srgan-mse-20.h5"), "Nearest"
    ]

    # corresponding names of the models
    model_names = [
        "SRDense", "SRDense-norm", "SRResNet", "SRGAN-from-scratch",
        "SRGAN-percept.-loss", "SRGAN-mse", "NearestNeighbor"
    ]

    # corresponding tile shapes
    tile_shapes = [((168, 168), (42, 42)), ((168, 168), (42, 42)),
                   ((504, 504), (126, 126)), ((336, 336), (84, 84)),
                   ((504, 504), (126, 126)), ((504, 504), (126, 126)),
                   ((336, 336), (84, 84))]

    # used to load the models with custom loss functions
    loss = VGG_LOSS((504, 504, 3))
    custom_objects = [{}, {
        "tf": tf
    }, {
        "tf": tf
    }, {
        "tf": tf
    }, {
        "tf": tf
    }, {
        "tf": tf
    }, {}]

    # creating a list of test images
    # [(lr, hr)]
    DOWN_SCALING_FACTOR = 4
    INTERPOLATION = cv2.INTER_CUBIC

    test_images = []
    root = os.path.join("..", "DSIDS", "test")
    # iterating over all files in the test folder
    for img in os.listdir(root):
        # chekcing if the file is an image
        if not ".jpg" in img:
            continue
        hr = Utils.crop_into_lr_shape(cv2.cvtColor(
            cv2.imread(os.path.join(root, img), cv2.IMREAD_COLOR),
            cv2.COLOR_BGR2RGB),
                                      shape=(3024, 4032))
        lr = cv2.resize(hr, (0, 0),
                        fx=1 / DOWN_SCALING_FACTOR,
                        fy=1 / DOWN_SCALING_FACTOR,
                        interpolation=INTERPOLATION)
        test_images.append((lr, hr))

    if TILES:
        '''
        First calculating performance metrics on single image tiles
        '''

        tile_performance = {}
        for i, mp in tqdm(enumerate(model_paths)):
            keras.backend.clear_session()
            # first step: load the model
            if i < 6:
                model = load_model(mp, custom_objects=custom_objects[i])

            mse = []
            psnr = []
            ssim = []
            mssim = []
            # second step: iterate over the test images
            for test_pair in tqdm(test_images):
                # third step: tile the test image
                lr_tiles = Utils.tile_image(test_pair[0],
                                            shape=tile_shapes[i][1])
                hr_tiles = Utils.tile_image(test_pair[1],
                                            shape=tile_shapes[i][0])

                m = []
                p = []
                s = []
                ms = []

                # fourth step: iterate over the tiles
                for lr, hr in zip(lr_tiles, hr_tiles):
                    # fifth step: calculate the sr tile
                    if i < 2:
                        if i == 1:
                            lr = lr.astype(np.float64)
                            lr = lr / 255
                        tmp = np.squeeze(
                            model.predict(np.expand_dims(lr, axis=0)))
                        if i == 1:
                            tmp = tmp * 255
                        tmp[tmp < 0] = 0
                        tmp[tmp > 255] = 255
                        sr = tmp.astype(np.uint8)
                    elif i < 6:
                        sr = Utils.denormalize(
                            np.squeeze(model.predict(
                                np.expand_dims(rescale_imgs_to_neg1_1(lr),
                                               axis=0)),
                                       axis=0))
                    else:
                        sr = cv2.resize(lr, (0, 0),
                                        fx=4,
                                        fy=4,
                                        interpolation=cv2.INTER_NEAREST)

                    # sixth step: append the calculated metric
                    m.append(metrics.MSE(hr, sr))
                    p.append(metrics.PSNR(hr, sr))
                    s.append(metrics.SSIM(hr, sr))
                    ms.append(metrics.MSSIM(hr, sr))

                # seventh step: append the mean metric for this image
                mse.append(np.mean(m))
                psnr.append(np.mean(p))
                ssim.append(np.mean(s))
                mssim.append(np.mean(ms))

            # eight step: append the mean metric for this model
            tile_performance[model_names[i]] = (np.mean(mse), np.mean(psnr),
                                                np.mean(ssim), np.mean(mssim))

        # final output
        print("Performance on single tiles:")
        f = open("tile_performance.txt", "w")
        for key in tile_performance:
            print(
                key + ":   MSE = " + str(tile_performance[key][0]) +
                ", PSNR = " + str(tile_performance[key][1]) + ", SSIM = " +
                str(tile_performance[key][2]),
                ", MSSIM = " + str(tile_performance[key][3]))
            f.write(key + " " + str(tile_performance[key][0]) + " " +
                    str(tile_performance[key][1]) + " " +
                    str(tile_performance[key][2]) + " " +
                    str(tile_performance[key][3]) + "\n")
        f.close()

    if WHOLE_LR:
        '''
        Second calculating performance metrics on a single upscaled image
        '''

        img_performance = {}
        for i, mp in tqdm(enumerate(model_paths)):
            keras.backend.clear_session()
            # first step: load the model
            if i < 6:
                model = load_model(mp, custom_objects=custom_objects[i])

                # second step: changing the input layer
                _in = Input(shape=test_images[0][0].shape)
                _out = model(_in)
                _model = Model(_in, _out)

            mse = []
            psnr = []
            ssim = []
            mssim = []
            # third step: iterate over the test images
            for test_pair in tqdm(test_images):
                # fourth step: calculate the sr image
                try:
                    if i < 2:
                        if i == 1:
                            lr = test_pair[0].astype(np.float64)
                            lr = lr / 255
                        else:
                            lr = test_pair[0]
                        tmp = np.squeeze(
                            _model.predict(np.expand_dims(lr, axis=0)))
                        if i == 1:
                            tmp = tmp * 255
                        tmp[tmp < 0] = 0
                        tmp[tmp > 255] = 255
                        sr = tmp.astype(np.uint8)
                    elif i < 6:
                        sr = Utils.denormalize(
                            np.squeeze(_model.predict(
                                np.expand_dims(rescale_imgs_to_neg1_1(
                                    test_pair[0]),
                                               axis=0)),
                                       axis=0))
                    else:
                        sr = cv2.resize(test_pair[0], (0, 0),
                                        fx=4,
                                        fy=4,
                                        interpolation=cv2.INTER_NEAREST)

                    # fifth step: append the metric for this image
                    mse.append(metrics.MSE(test_pair[1], sr))
                    psnr.append(metrics.PSNR(test_pair[1], sr))
                    ssim.append(metrics.SSIM(test_pair[1], sr))
                    mssim.append(metrics.MSSIM(test_pair[1], sr))
                except:
                    mse.append("err")
                    psnr.append("err")
                    ssim.append("err")
                    mssim.append("err")

            # sixth step: append the mean metric for this model
            try:
                img_performance[model_names[i]] = (np.mean(mse), np.mean(psnr),
                                                   np.mean(ssim),
                                                   np.mean(mssim))
            except:
                img_performance[model_names[i]] = ("err", "err", "err", "err")

        # final output
        print("Performance on whole lr:")
        f = open("whole_lr_performance.txt", "w")
        for key in img_performance:
            print(
                key + ":   MSE = " + str(img_performance[key][0]) +
                ", PSNR = " + str(img_performance[key][1]) + ", SSIM = " +
                str(img_performance[key][2]),
                ", MSSIM = " + str(img_performance[key][3]))
            f.write(key + " " + str(img_performance[key][0]) + " " +
                    str(img_performance[key][1]) + " " +
                    str(img_performance[key][2]) + " " +
                    str(img_performance[key][3]) + "\n")
        f.close()

    if STITCHED:
        '''
        Second calculating performance metrics on a stitched image
        '''

        stitch_performance = {}
        for i, mp in tqdm(enumerate(model_paths)):
            keras.backend.clear_session()
            # first step: load the model
            if i < 6:
                model = load_model(mp, custom_objects=custom_objects[i])

            mse = []
            psnr = []
            ssim = []
            mssim = []

            o_mse = []
            o_psnr = []
            o_ssim = []
            o_mssim = []
            # second step: iterate over the test images
            for test_pair in tqdm(test_images):
                # third step: tile the test image
                lr_tiles = Utils.tile_image(test_pair[0],
                                            shape=tile_shapes[i][1])
                lr_tiles_overlap = Utils.tile_image(test_pair[0],
                                                    shape=tile_shapes[i][1],
                                                    overlap=True)

                sr_tiles = []
                sr_tiles_overlap = []
                # fourth step: iterate over the tiles
                for lr in lr_tiles:
                    # fifth step: calculate the sr tiles
                    if i < 2:
                        if i == 1:
                            lr = lr.astype(np.float64)
                            lr = lr / 255
                        tmp = np.squeeze(
                            model.predict(np.expand_dims(lr, axis=0)))
                        if i == 1:
                            tmp = tmp * 255
                        tmp[tmp < 0] = 0
                        tmp[tmp > 255] = 255
                        sr = tmp.astype(np.uint8)
                        sr_tiles.append(sr)
                    elif i < 6:
                        sr_tiles.append(
                            Utils.denormalize(
                                np.squeeze(model.predict(
                                    np.expand_dims(rescale_imgs_to_neg1_1(lr),
                                                   axis=0)),
                                           axis=0)))
                    else:
                        sr_tiles.append(
                            cv2.resize(lr, (0, 0),
                                       fx=4,
                                       fy=4,
                                       interpolation=cv2.INTER_NEAREST))

                for lr in lr_tiles_overlap:
                    # fifth step: calculate the sr tiles
                    if i < 2:
                        if i == 1:
                            lr = lr.astype(np.float64)
                            lr = lr / 255
                        tmp = np.squeeze(
                            model.predict(np.expand_dims(lr, axis=0)))
                        if i == 1:
                            tmp = tmp * 255
                        tmp[tmp < 0] = 0
                        tmp[tmp > 255] = 255
                        sr = tmp.astype(np.uint8)
                        sr_tiles_overlap.append(sr)
                    elif i < 6:
                        sr_tiles_overlap.append(
                            Utils.denormalize(
                                np.squeeze(model.predict(
                                    np.expand_dims(rescale_imgs_to_neg1_1(lr),
                                                   axis=0)),
                                           axis=0)))
                    else:
                        sr_tiles_overlap.append(
                            cv2.resize(lr, (0, 0),
                                       fx=4,
                                       fy=4,
                                       interpolation=cv2.INTER_NEAREST))

                # sixth step: stitch the image
                sr_simple = ImageStitching.stitch_images(
                    sr_tiles, test_pair[1].shape[1], test_pair[1].shape[0],
                    sr_tiles[0].shape[1], sr_tiles[0].shape[0],
                    test_pair[1].shape[1] // sr_tiles[0].shape[1],
                    test_pair[1].shape[0] // sr_tiles[0].shape[0])
                sr_advanced = ImageStitching.stitching(
                    sr_tiles_overlap,
                    LR=None,
                    image_size=(test_pair[1].shape[0], test_pair[1].shape[1]),
                    adjustRGB=False,
                    overlap=True)

                # seventh step: append the mean metric for this image
                mse.append(metrics.MSE(test_pair[1], sr_simple))
                psnr.append(metrics.PSNR(test_pair[1], sr_simple))
                ssim.append(metrics.SSIM(test_pair[1], sr_simple))
                mssim.append(metrics.MSSIM(test_pair[1], sr_simple))

                o_mse.append(metrics.MSE(test_pair[1], sr_advanced))
                o_psnr.append(metrics.PSNR(test_pair[1], sr_advanced))
                o_ssim.append(metrics.SSIM(test_pair[1], sr_advanced))
                o_mssim.append(metrics.MSSIM(test_pair[1], sr_advanced))

            # ninth step: append the mean metric for this model
            stitch_performance[model_names[i]] = [
                (np.mean(mse), np.mean(psnr), np.mean(ssim), np.mean(mssim)),
                (np.mean(o_mse), np.mean(o_psnr), np.mean(o_ssim),
                 np.mean(o_mssim))
            ]

        # final output
        print("Performance on stitched images:")
        f = open("stitch_performance.txt", "w")
        for key in stitch_performance:
            print(
                "simple stitch:  " + key + ":   MSE = " +
                str(stitch_performance[key][0][0]) + ", PSNR = " +
                str(stitch_performance[key][0][1]) + ", SSIM = " +
                str(stitch_performance[key][0][2]),
                ", MSSIM = " + str(stitch_performance[key][0][3]))
            print(
                "advanced stitch:  " + key + ":   MSE = " +
                str(stitch_performance[key][1][0]) + ", PSNR = " +
                str(stitch_performance[key][1][1]) + ", SSIM = " +
                str(stitch_performance[key][1][2]),
                ", MSSIM = " + str(stitch_performance[key][1][3]))
            f.write(key + " " + str(stitch_performance[key][0][0]) + " " +
                    str(stitch_performance[key][0][1]) + " " +
                    str(stitch_performance[key][0][2]) + " " +
                    str(stitch_performance[key][0][3]) + "\n")
            f.write(key + " " + str(stitch_performance[key][1][0]) + " " +
                    str(stitch_performance[key][1][1]) + " " +
                    str(stitch_performance[key][1][2]) + " " +
                    str(stitch_performance[key][1][3]) + "\n")
        f.close()
Пример #4
0
 netG_C2B = eval(checkB[0])(1, 3).to(opt.device)
 # load check point
 netG_A2C.load_state_dict(torch.load(args.netGA))
 netG_C2B.load_state_dict(torch.load(args.netGB))
 netG_A2C.eval()
 netG_C2B.eval()
 print("Starting visualization Loop...")
 # setup data loader
 data_loader = DataLoader(
     testset,
     opt.batch_size,
     num_workers=opt.num_works,
     shuffle=False,
     pin_memory=True,
 )
 evaluator = metrics.PSNR()
 for idx, sample in enumerate(data_loader):
     realA = sample['src'].to(opt.device)
     #         realA -= 0.5
     realB = sample['tar'].to(opt.device)
     #         realB -= 0.5
     # Y = 0.2125 R + 0.7154 G + 0.0721 B [RGB2Gray, 3=>1 ch]
     realBC = 0.2125 * realB[:,:1,:,:] + \
              0.7154 * realB[:,1:2,:,:] + \
              0.0721 * realB[:,2:3,:,:]
     sf = int(checkA[2][1])
     realBA = nn.functional.interpolate(realBC, scale_factor=1. / sf)
     #         realBA = nn.functional.interpolate(realBA, scale_factor=sf)
     realAA = nn.functional.interpolate(realA, scale_factor=1. / sf)
     fake_AC = netG_A2C(realAA)
     fake_AB = netG_C2B(fake_AC)
Пример #5
0
    if not os.path.exists(save_dirA) or not os.path.exists(save_dirB):
        os.makedirs(save_dirA)
        os.makedirs(save_dirB)
    ### Build model
    netG_A2C = eval(checkA[0].replace('@G2LAB', ''))(1, 1, int(checkA[2][1])).to(opt.device)
    netG_C2B = eval(checkB[0].replace('@G2LAB', ''))(1, 2).to(opt.device)
    # load check point
    netG_A2C.load_state_dict(torch.load(os.path.join(Check_DIR, os.path.basename(args.netGA))))
    netG_C2B.load_state_dict(torch.load(os.path.join(Check_DIR, os.path.basename(args.netGB))))
    netG_A2C.eval()
    netG_C2B.eval()
    print("Starting test Loop...")
    # setup data loader
    data_loader = DataLoader(testset, opt.batch_size, num_workers=opt.num_works,
                             shuffle=False, pin_memory=True, )
    evaluators = [metrics.MSE(), metrics.PSNR(), metrics.AE(), metrics.SSIM()]
    performs = [[] for i in range(len(evaluators))]
    for idx, sample in enumerate(data_loader):
        realA = sample['src'].to(opt.device)
#         realA -= 0.5
        realB = sample['tar'].to(opt.device)
#         realB -= 0.5
        # Y = 0.2125 R + 0.7154 G + 0.0721 B [RGB2Gray, 3=>1 ch]
        realBC = realB[:,:1,:,:]
        sf = int(checkA[2][1])
        realBA = nn.functional.interpolate(realBC, scale_factor=1. / sf, mode='bilinear')
        realBA = nn.functional.interpolate(realBA, scale_factor=sf, mode='bilinear')
#         realAA = nn.functional.interpolate(realA, scale_factor=1. / sf)
        realAA = realA
        fake_AC = netG_A2C(realAA)
        fake_AB = netG_C2B(fake_AC)
Пример #6
0
def train(D, G, curr_lr, lr, n_epoch, beta1, beta2, bs):
    data_loader = CreateDataLoader(batchSize=bs)
    dataset = data_loader.load_data()

    # Create optimizers for the generators and discriminators
    optimizer_G = torch.optim.Adam(G.parameters(), lr=lr, betas=(beta1, beta2))
    optimizer_D = torch.optim.Adam(D.parameters(), lr=lr, betas=(beta1, beta2))

    res_d = []
    res_g = []
    total_steps = 0
    for epoch in range(1, n_epoch + 1):
        print("Running epoch:", epoch)
        start_time_epoch = time.time()
        sum_d = 0
        sum_g = 0
        for i, data in enumerate(dataset):
            total_steps += c.batchSize

            images_X = data['A']
            images_Y = data['B']

            # move images to GPU if available (otherwise stay on CPU)
            # train discriminator on real
            real_A = Variable(images_X.to(device))
            fake_B = G.forward(real_A)
            real_B = Variable(images_Y.to(device))

            # =======================Train the discriminator=======================#
            for iter in range(5):
                optimizer_D.zero_grad()

                # Real images
                D_real = D.forward(real_B)
                # Fake images
                D_fake = D.forward(fake_B.detach())
                # Gradient penalty
                gradient_penalty = calc_gradient_penalty(D, real_B.data, fake_B.data)
                d_loss = D_fake.mean() - D_real.mean() + gradient_penalty
                d_loss.backward(retain_graph=True)

                optimizer_D.step()
                if iter == 4:
                    sum_d += d_loss.item()

            #========================Train the generator===========================#
            optimizer_G.zero_grad()

            fake_B = G.forward(real_A)
            D_fake = D.forward(fake_B)
            g_loss = -D_fake.mean()
            g_contentloss = perceptual_loss(fake_B, real_B) * 100
            g_total_loss = g_loss + g_contentloss
            g_total_loss.backward()

            optimizer_G.step()

            # printing pnsr & SSIM metrics at certain frequency
            if total_steps % c.display_freq == 4:
                image_res = util.get_visuals(real_A, fake_B, real_B)
                psnr = metrics.PSNR(image_res['Restored_Train'], image_res['Sharp_Train'])
                print('PSNR on Train (at epoch {0}) = {1}'.format(epoch, psnr))
                ssim = metrics.SSIM_my(image_res['Restored_Train'], image_res['Sharp_Train'])
                print('SSIM_my on Train (at epoch {0}) = {1}'.format(epoch, ssim))

            # print losses & errors
            # if total_steps % c.print_freq == 0:
            #     err = util.get_errors(g_loss, g_contentloss, d_loss)
            #     t = (time.time() - start_time_epoch) / c.batchSize
            #     util.print_errors(epoch, i, err, t)

            # sum the loss over all the image
            sum_g += g_total_loss.item()

        # decaying learning rate
        if epoch > 150:
            lrd = 0.0001 / 150
            new_lr = curr_lr - lrd

            for param_group in optimizer_D.param_groups:
                param_group['lr'] = new_lr
            for param_group in optimizer_G.param_groups:
                param_group['lr'] = new_lr
            print('Update learning rate: %f -> %f' % (curr_lr, new_lr))
            curr_lr = new_lr

        # saving model after every 50 epochs
        if epoch % c.save_freq == 0:
            torch.save(G.state_dict(), 'model_G_' + str(epoch) + '.pt')
            torch.save(D.state_dict(), 'model_D_' + str(epoch) + '.pt')
        res_d.append(np.mean(sum_d))
        res_g.append(np.mean(sum_g))
        end_time_epoch = time.time()

        print("Time for epoch {0}: {1} | Disc loss: {2}  | Gen loss: {3}".format(epoch, (end_time_epoch - start_time_epoch), res_d[epoch-1], res_g[epoch-1]))

    torch.save(G.state_dict(), 'model_G_last.pt')
    torch.save(D.state_dict(), 'model_D_last.pt')
    print("Model Saved!")
    util.plotter(res_d, res_g)
Пример #7
0
    D = models.PatchDiscriminator().to(config.DEVICE)

    #set optimizer
    G_optim = optim.Adam(G.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=config.BETAS)
    D_optim = optim.Adam(D.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=config.BETAS)

    #set criterion
    G_criterion = losses.GLoss()
    D_criterion = losses.DLoss()

    #set meter
    PSNR_meter = metrics.PSNR()

    #train
    G.train()
    D.train()
    for epoch in range(config.EPOCHES):
        with tqdm(total=len(train_data), ncols=80) as t:
            t.set_description('epoch: {}/{}'.format(epoch + 1, config.EPOCHES))

            for input_img, real_img in train_data_loader:
                fake_img = G(input_img)
                print(fake_img)
                real_pred = D(input_img, real_img)
                fake_pred = D(input_img, fake_img)

                G_optim.zero_grad()