예제 #1
0
def main_boilerplate():
    # Load image and kernel
    img = sm.imresize(sm.face(), 4.0, interp='bicubic')
    x = utils.rgb2ycc(img.astype(np.float32) / 255.)[:, :, 0]
    k = utils.gaussian_kernel(2, 3.5)
    noise = np.random.normal(0., 0.01, x.shape).astype(np.float32)
    return img, x, k, noise
예제 #2
0
파일: isgan.py 프로젝트: pingcsu/isgan
    def draw_images(self, nb_images=1):
        # Select random images from the dataset
        cover_idx = np.random.randint(0, self.images_lfw.shape[0], nb_images)
        secret_idx = np.random.randint(0, self.images_lfw.shape[0], nb_images)
        imgs_cover = self.images_lfw[cover_idx]
        imgs_secret = self.images_lfw[secret_idx]

        images_ycc = np.zeros(imgs_cover.shape)
        secret_gray = np.zeros((imgs_secret.shape[0], 1, imgs_cover.shape[2], imgs_cover.shape[3]))

        # Convert cover in ycc and secret in gray
        for k in range(nb_images):
            images_ycc[k, :, :, :] = rgb2ycc(imgs_cover[k, :, :, :])
            secret_gray[k, 0, :, :] = rgb2gray(imgs_secret[k, :, :, :])

        # Rescale to [-1, 1]
        X_test_ycc = (images_ycc.astype(np.float32) - 127.5) / 127.5
        X_test_gray = (secret_gray.astype(np.float32) - 127.5) / 127.5
        
        imgs_stego, imgs_recstr = self.base_model.predict([X_test_ycc, X_test_gray])

        # Unnormalize stego and reconstructed images
        imgs_stego = imgs_stego.astype(np.float32) * 127.5 + 127.5
        imgs_recstr = imgs_recstr.astype(np.float32) * 127.5 + 127.5

        # Flip dimensions of all images to be channel last
        imgs_cover = imgs_cover.transpose((0, 2, 3, 1))
        imgs_stego = imgs_stego.transpose((0, 2, 3, 1))
        secret_gray = np.reshape(secret_gray, (nb_images, 256, 256))
        imgs_recstr = np.reshape(imgs_recstr, (nb_images, 256, 256))


        for k in range(nb_images):
            # plt.imsave('images/cover_{}'.format(k), imgs_cover[k, :, :, :])
            scipy.misc.imsave('images/{}_cover.png'.format(k), imgs_cover[k, :, :, :])
            plt.imsave('images/{}_secret'.format(k), secret_gray[k, :, :], cmap='gray')
            scipy.misc.imsave('images/{}_stego.png'.format(k), imgs_stego[k, :, :, :])
            # plt.imsave('images/stego_{}'.format(k), imgs_stego[k, :, :, :])
            plt.imsave('images/{}_recstr'.format(k), imgs_recstr[k, :, :], cmap='gray')
        
        print("Images drawn.")
예제 #3
0
            'diffDetail': diffDetail,
            'clamp': clamp,
            'heavy': heavy,
            'HUE': HUE
        }
        json.dump(config, f)
else:
    # load saved parameters, and decoding them to mem
    with open(rootFolder + "/parameter.json", 'r') as f:
        config = json.load(f)
        locals().update(config)

if HUE:
    lambd = lambda x: (x * 255).byte().to(torch.float32).to(device)
else:
    lambd = lambda x: utils.rgb2ycc(
        (x * 255).byte().float(), True).to(torch.float32).to(device)

# Building the target dataset
if target == "CIFAR":
    # Define dimensions
    targetSize = [3, 32, 32]
    dimensional = 2
    channel = targetSize[0]
    blockLength = targetSize[-1]

    # Define nomaliziation and decimal
    decimal = flow.ScalingNshifting(256, -128)
    rounding = utils.roundingWidentityGradient

    # Building train & test datasets
    trainsetTransform = torchvision.transforms.Compose([
예제 #4
0
def testBPD(loader, earlyStop=-1):
    actualBPD = []
    theoryBPD = []
    ERR = []

    if not HUE:
        yccERR = []

    count = 0
    with torch.no_grad():
        for RGBsamples, _ in loader:
            if HUE:
                samples = RGBsamples
            else:
                samples = utils.rgb2ycc(RGBsamples, True, True)

            count += 1
            z, _ = f.inverse(samples)

            zparts = divide(z)

            CDF = calCDF(samples.shape[0])

            state = []

            for i in range(samples.shape[0]):
                symbols = zparts[i]
                s = rans.x_init
                for j in reversed(range(symbols.shape[-1])):
                    cdf = CDF[:, i, j]
                    s = coder.encoder(cdf,
                                      symbols[j],
                                      s,
                                      precision=args.precision)
                state.append(rans.flatten(s))
            '''
            def compare(idx):
                print(calPDF(zparts[idx], CDF[:, idx, :]) / (np.prod(samples.shape[1:]) * np.log(2.)))
                print(-f.logProbability(samples[idx:idx + 1]) / (np.prod(samples.shape[1:]) * np.log(2.)))
                print(32 / (np.prod(samples.shape[1:])) * (state[idx]).shape[0])

            compare(0)
            import pdb
            pdb.set_trace()
            '''
            actualBPD.append(32 / (np.prod(samples.shape[1:])) *
                             np.mean([s.shape[0] for s in state]))
            theoryBPD.append(
                (-f.logProbability(samples).mean() /
                 (np.prod(samples.shape[1:]) * np.log(2.))).detach().item())

            rcnParts = []
            for i in range(samples.shape[0]):
                s = rans.unflatten(state[i])
                symbols = []
                for j in range(np.prod(targetSize)):
                    cdf = CDF[:, i, j]
                    s, rcnSymbol = coder.decoder(cdf,
                                                 s,
                                                 precision=args.precision)
                    symbols.append(rcnSymbol)
                rcnParts.append(torch.tensor(symbols).reshape(1, -1))
            rcnParts = torch.cat(rcnParts, 0)

            rcnZ = join(rcnParts)

            rcnSamples, _ = f.forward(rcnZ.float())

            if not HUE:
                yccERR.append(
                    torch.abs(RGBsamples - utils.ycc2rgb(
                        rcnSamples, True, True).contiguous()).mean().item())
                ERR.append(
                    torch.abs(samples.contiguous() - rcnSamples).sum().item())
            else:
                ERR.append(torch.abs(samples - rcnSamples).sum().item())

            if count >= earlyStop and earlyStop > 0:
                break

    actualBPD = np.array(actualBPD)
    theoryBPD = np.array(theoryBPD)
    ERR = np.array(ERR)

    if not HUE:
        yccERR = np.array(yccERR)

    if HUE:
        print(
            "===========================SUMMARY=================================="
        )
        print("Actual Mean BPD:", actualBPD.mean(), "Theory Mean BPD:",
              theoryBPD.mean(), "Mean Error:", ERR.mean())
    else:
        print(
            "===========================SUMMARY=================================="
        )
        print("Actual Mean BPD:", actualBPD.mean(), "Theory Mean BPD:",
              theoryBPD.mean(), "Mean Error:", ERR.mean(), "ycc Mean Error:",
              yccERR.mean())

    return actualBPD, theoryBPD, ERR
    set_up_logging()

    y = misc.imread(img_name)
    #y = y[:,:,0]
    logger.info("Image '{0}' has shape {1} => {2} pixels".format(
        img_name, y.shape, y.shape[0] * y.shape[1]))
    if len(y.shape) == 2:  # Detect gray scale
        z = image_processing(y)[0]
        display_or_save('input.png', y, cmap='gray')
        display_or_save('output.png',
                        z.astype(np.uint8, copy=False),
                        cmap='gray')
        residuals(y, z)
    else:
        y = rgb2ycc(y)
        y_ycc = y[:, :, 0]
        y_cr = y[:, :, 1]
        y_cb = y[:, :, 2]

        z_ycc, z_cr, z_cb = image_processing(y_ycc, y_cr, y_cb)

        z = np.empty(y.shape)
        z[:, :, 0] = z_ycc
        z[:, :, 1] = z_cr
        z[:, :, 2] = z_cb

        y = ycc2rgb(y)
        z = ycc2rgb(z)

        display_or_save('y_ycc.png', y_ycc, cmap='gray')
예제 #6
0
파일: isgan.py 프로젝트: pingcsu/isgan
    def train(self, epochs, batch_size=4):
        # Load the LFW dataset
        print("Loading the dataset: this step can take a few minutes.")
        # Complete LFW dataset
        # lfw_people = fetch_lfw_people(color=True, resize=1.0, \
        #                               slice_=(slice(0, 250), slice(0, 250)))

        # Smaller dataset used for implementation evaluation
        lfw_people = fetch_lfw_people(color=True, resize=1.0, \
                                      slice_=(slice(0, 250), slice(0, 250)), \
                                      min_faces_per_person=3)

        images_rgb = lfw_people.images
        images_rgb = np.moveaxis(images_rgb, -1, 1)

        # Zero pad them to get 256 x 256 inputs
        images_rgb = np.pad(images_rgb, ((0,0), (0,0), (3,3), (3,3)), 'constant')
        self.images_lfw = images_rgb

        # Convert images from RGB to YCbCr and from RGB to grayscale
        images_ycc = np.zeros(images_rgb.shape)
        secret_gray = np.zeros((images_rgb.shape[0], 1, images_rgb.shape[2], images_rgb.shape[3]))
        for k in range(images_rgb.shape[0]):
            images_ycc[k, :, :, :] = rgb2ycc(images_rgb[k, :, :, :])
            secret_gray[k, 0, :, :] = rgb2gray(images_rgb[k, :, :, :])

        # Rescale to [-1, 1]
        X_train_ycc = (images_ycc.astype(np.float32) - 127.5) / 127.5
        X_train_gray = (secret_gray.astype(np.float32) - 127.5) / 127.5

        # Adversarial ground truths
        original = np.ones((batch_size, 1))
        encrypted = np.zeros((batch_size, 1))

        for epoch in range(epochs):
            # Select a random batch of cover images
            idx = np.random.randint(0, X_train_ycc.shape[0], batch_size)
            imgs_cover = X_train_ycc[idx]

            # Idem for secret images
            idx = np.random.randint(0, X_train_ycc.shape[0], batch_size)
            imgs_gray = X_train_gray[idx]

            # Predict the generator output for these images
            imgs_stego, _ = self.base_model.predict([imgs_cover, imgs_gray])
            # imgs_stego, _, _ = self.adversarial.predict([imgs_cover, imgs_gray])

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(imgs_cover, original)
            d_loss_encrypted = self.discriminator.train_on_batch(imgs_stego, encrypted)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_encrypted)

            # Train the generator
            g_loss = self.adversarial.train_on_batch([imgs_cover, imgs_gray], [imgs_cover, imgs_gray, original])

            # Print the progress
            print("{} [D loss: {}] [G loss: {}]".format(epoch, d_loss, g_loss[0]))

            self.adversarial.save('adversarial.h5')
            self.discriminator.save('discriminator.h5')
            self.base_model.save('base_model.h5')