Ejemplo n.º 1
0
    def on_prepare(self):
        """
        准备阶段,完成如下事件:
        1. 加载vgg16风格损失网络
        2. 加载风格损失图片
        """

        # 获取输入参数
        args_parser = ArgsParser(self.config_loader.callback_args)
        # 获取风格图像位置
        style_path = args_parser.get("style_image")

        # 加载风格图像
        self.style_image = train_tool.read_image(
            style_path,
            self.config_loader.image_width,
            self.config_loader.image_height,
            change_scale=False)
        print("load style image {}....".format(style_path))

        self.style_image = self.style_image / 255

        print(self.style_image)

        # 加载vgg16网络,若网络不存在,会直接经行下载
        # 该网络使用image-net训练,不加载全连接层
        vgg_16 = tf.keras.applications.vgg16.VGG16(weights='imagenet',
                                                   include_top=False)
        print("load VGG 16....")

        # 激活层id
        activate_id = [3, 6, 10, 14, 18]
        self.activate_list = []
        # 生成各个激活层
        for id in activate_id:
            model = tf.keras.Model(inputs=vgg_16.input,
                                   outputs=vgg_16.layers[id].output)
            #需要将模型设置成不可训练,(若使用keras编译,则必须设置,
            # 但使用tensorflow自定义训练,个人认为只要不把trainable_variable传递过去就不会进行训练(尚未验证))
            model.trainable = False

            self.activate_list.append(model)

        print("VGG16 activation layers done....")

        #由于风格图像给定,每一层特征就给定,可以提前计算
        #直接计算gram举证
        self.style_feature_list = []
        for activate in self.activate_list:
            self.style_feature_list.append(gram(activate(self.style_image)))
        print("Gram feature done....")

        # 执行父类方法
        super(StyleContainer, self).on_prepare()
Ejemplo n.º 2
0
    def on_prepare(self):

        # 载入数据集
        self.prepare_image_dataset()

        # 创建模型
        self.model = pixel.UNet(
            input_shape=[128, 128, 6],
            high_performance_enable=self.config_loader.high_performance)
        self.hide_model = ExtractInvisible([128, 128, 6])
        self.hide_model.summary()

        self.reveal_model = pixel.UNet(
            input_shape=[128, 128, 6],
            high_performance_enable=self.config_loader.high_performance)

        # 设置优化器
        optimizer = tf.keras.optimizers.Adam(1e-4)

        # 注册模型与优化器
        self.register_model_and_optimizer(
            optimizer, {
                "model": self.model,
                "hide_model": self.hide_model,
                "reveal_model": self.reveal_model
            }, "optimizer")
        # self.register_model_and_optimizer(
        #     optimizer, {"model": self.model}, "optimizer")
        print("Initial model and optimizer....")

        #风格
        style_image = train_tool.read_image("./style.jpg",
                                            128,
                                            128,
                                            change_scale=False)
        style_image = tf.cast(style_image, tf.float32)
        style_image = style_image / 127.5 - 1
        style_batch = np.zeros([self.config_loader.batch_size, 128, 128, 3])

        for i in range(self.config_loader.batch_size):
            style_batch[i, :, :, :] = style_image[0]

        self.style_batch = style_batch

        # 注册需要记录的损失名称
        self.register_display_loss(["loss", "s", "h1", "h2", "r"])

        # 调用父类
        super(HideContainer, self).on_prepare()
Ejemplo n.º 3
0
    def on_prepare(self):

        # 准备数据集
        self.on_prepare_dataset()

        # 创建生成网络
        self.generator = UNet(
            input_shape=self.config_loader.config["dataset"]["image_size"]
            ["value"],
            high_performance_enable=self.config_loader.high_performance)
        print("Initial generator....")

        self.extractor = ExtractInvisible()
        print("Initial extractor....")

        # 创建生成优化器
        optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
        print("Initial optimizer....")

        self.register_model_and_optimizer(optimizer, {
            "generator": self.generator,
            "extractor": self.extractor
        }, "opt")
        print("register generator and optimizer....")

        self.register_display_loss(["total_loss", "gen_loss", "wm_loss"])

        self.wm = train_tool.read_image(
            "C:\\Users\\happy\\Desktop\\Evil\\Eidolon-Tensorflow\\wm.png",
            128,
            128,
            change_scale=True)

        self.noise = np.zeros([1, 128, 128, 3])

        super(WMContainer, self).on_prepare()
Ejemplo n.º 4
0
def eval_key_sensitive(key_path,
                       input_path,
                       target_path,
                       model_path,
                       visual_result_path=None,
                       key_bits=1024):
    """
    评估密钥敏感性
    """
    # load model
    model = model_use.EncoderDecoder(model_path, key_enable=True)
    print("initial model....")

    # load key in numpy
    secret_key = np.load(key_path)
    # 把他变成长条是为了方便修改
    secret_key = np.reshape(secret_key, [1024, 1])
    print("load model....")

    # input_image = train_tool.read_image(
    #     in, 32, 32, change_scale=True)
    # print("load input....")

    target_image = train_tool.read_image(target_path,
                                         32,
                                         32,
                                         change_scale=True)
    print("load target....")

    result_list = []

    # 修改秘钥, 测试改变不同位数下的敏感性
    for i in range(0, key_bits + 1):

        # copy key
        temp_key = secret_key.copy()

        # 修改位数,从头开始修改
        for j in range(0, i):
            temp_key[j, 0] = -temp_key[j, 0]

        temp_key = np.reshape(temp_key, [1, 32, 32, 1])

        # save temp key
        np.save("~temp_key.npy", temp_key)

        # decode
        decode_image = model.decode_from_image(encode_image_path=input_path,
                                               secret_key_path="~temp_key.npy")

        # eval
        decode_image = decode_image * 2 - 1
        decode_image = tf.reshape(decode_image, [1, 32, 32, 3])

        psnr = eval_util.evaluate(decode_image,
                                  target_image,
                                  psnr_enable=True,
                                  ssim_enable=False,
                                  ber_enable=False)

        result_list.append(psnr["psnr"])

        print("\r" + "eval {}..., psnr={}".format(i, psnr["psnr"]),
              end="",
              flush=True)

    print()
    # delete temp key file
    os.remove("~temp_key.npy")

    return result_list
Ejemplo n.º 5
0
def eval_all(data_path,
             model_path,
             visual_result_dir=None,
             watermark_path=None,
             wm_width=64,
             wm_height=64,
             watermark_binary=False,
             attack_test_func=None):
    """
    评估函数, 评估模型的PSNR与SSIM, 若存在水印,则评估水印的PSNR或BER
    @since 2019.11.27
    @author anomymity
    :param model_path: 模型路径,包括生成网络和提取网络,名称默认为generator.h5和extractor.h5
    """

    # create empty config, this is used when loading data
    configs = config.ConfigLoader()
    # load data
    data_loader = loader.ImageLoader(data_path, is_training=False)
    dataset = data_loader.load(configs)
    print("load data....")

    # load wateramrk
    watermark_enable = False
    wm_target = None
    if watermark_path != None:
        watermark_enable = True
        """
        @author anomymity
        @update 2019.11.28
        change_scale没有指定,因此读取的水印为[0,1], 与要求[-1,1]不符,使得后续计算误码率有误。现已修复。
        """
        wm_target = train_tool.read_image(watermark_path,
                                          wm_width,
                                          wm_height,
                                          binary=watermark_binary,
                                          change_scale=True)
        print("load watermark....")

    generator_path = os.path.join(model_path, "generator.h5")
    extractor_path = None
    if watermark_enable == True:
        extractor_path = os.path.join(model_path, "extractor.h5")

    # load model
    print("load model....")
    model = model_use.GeneratorModel(generator_path=generator_path,
                                     wm_width=wm_width,
                                     wm_height=wm_height,
                                     watermark_enable=watermark_enable,
                                     extractor_path=extractor_path,
                                     binary=watermark_binary)

    if visual_result_dir != None and os.path.exists(
            visual_result_dir) == False:
        os.mkdir(visual_result_dir)

    # start eval
    print("start eval....")
    """
    The result set is a key-value dir, 
    which will save 1. mean_value, 2. value_list(the mean value is calculated bu this)
    """
    result_set = {}
    # the value list will save all the results by the image
    value_list = []
    # default value
    image_mean_psnr = 0
    image_mean_ssim = 0
    wm_mean_error = 0
    # image num
    count = 0
    # for each
    for input_image, ground_truth in dataset:
        # result each
        result_each = {}

        # generate
        output_tensor, wm_tensor, wm_feature = model.generate(
            input_image, attack_test_func=attack_test_func)

        # eval image
        image_result_each = eval_util.evaluate(output_tensor,
                                               ground_truth,
                                               psnr_enable=True,
                                               ssim_enable=True,
                                               ber_enable=False)

        # save results
        result_each["image_psnr"] = image_result_each["psnr"]
        result_each["image_ssim"] = image_result_each["ssim"]

        # caluclate total value
        image_mean_psnr = image_mean_psnr + result_each["image_psnr"]
        image_mean_ssim = image_mean_ssim + result_each["image_ssim"]

        if watermark_path != None:
            # eval watermark
            wm_result_each = eval_util.evaluate(
                wm_tensor,
                wm_target,
                psnr_enable=(not watermark_binary),
                ssim_enable=False,
                ber_enable=watermark_binary)

            # calcualte watermark error
            if watermark_binary == True:
                result_each["wm_ber"] = wm_result_each["ber"]
                wm_mean_error = wm_mean_error + result_each["wm_ber"]
            else:
                result_each["wm_psnr"] = wm_result_each["psnr"]
                wm_mean_error = wm_mean_error + result_each["wm_psnr"]

        # append
        value_list.append(result_each)

        # save visual results
        if visual_result_dir != None:

            # basic
            image_list = [input_image, ground_truth, output_tensor]
            title_list = ["IN", "GT", "PR"]

            # wm
            if watermark_path != None:
                image_list.append(wm_tensor)
                image_list.append(wm_feature)
                title_list.append("WM")
                title_list.append("WF")

            # save image
            train_tool.save_images(image_list, title_list, visual_result_dir,
                                   count + 1)

        # one image test finished
        count = count + 1
        # this print will flush at the same place
        print("\r" + "testing image {} ...".format(count), end='', flush=True)

    # change line now
    print("")
    # calculate image mean value
    mean_value = {}
    image_mean_psnr = image_mean_psnr / count
    mean_value["psnr"] = image_mean_psnr
    image_mean_ssim = image_mean_ssim / count
    mean_value["ssim"] = image_mean_ssim

    # mean watermark error
    if watermark_path != None:
        wm_mean_error = wm_mean_error / count

        if watermark_binary == True:
            mean_value["wm_ber"] = wm_mean_error
        else:
            mean_value["wm_psnr"] = wm_mean_error

    # save all
    result_set["mean_value"] = mean_value
    result_set["value_list"] = value_list

    # genrate report
    eval_report = "The evaluating report comes here:\n'image psnr' = {}, 'image ssim' = {}".format(
        image_mean_psnr, image_mean_ssim)

    if watermark_path != None:
        eval_report_wm = ", 'watermark {}' = {}"
        if watermark_binary == True:
            wm_format = "ber"
        else:
            wm_format = "psnr"
        eval_report_wm = eval_report_wm.format("wm_" + wm_format,
                                               mean_value["wm_" + wm_format])
        eval_report = eval_report + eval_report_wm

    # print(eval_report)
    return result_set, eval_report
Ejemplo n.º 6
0
    def on_prepare(self):
        """
        准备阶段,完成以下事宜:
        1. 创建水印提取网络
        2. 加载解码器
        3. 调用父类的方法,该方法完成以下事宜:
            1. 加载数据集
            2. 创建网络与优化器
            3. 将网络与优化器注册到父类中,以便自动保存
            4. 调用父类on_prepare
        """
        """
        This will be invokde in initial process
        """
        print("This container is for training the invisible watermark.")

        # lambdas in loss
        self.lambda_wm_positive = self.config_loader.lambda_array[0]
        self.lambda_wm_negitive = self.config_loader.lambda_array[1]
        # lambdas print
        print("lp={}, ln={}".format(
            self.lambda_wm_positive, self.lambda_wm_negitive))

        # 解析所有配置
        args_parser = ArgsParser(self.config_loader.callback_args)

        # 解码器
        self.decoder_path = args_parser.get("decoder")

        # 水印
        self.wm_path = args_parser.get("wm_path")

        # 水印大小
        self.wm_width = int(args_parser.get("wm_width"))

        # training noise attack
        self.noise_attack = args_parser.get("noise") in ("True", "true")
        print("noise attack:{}".format(self.noise_attack))

        # 训练随机裁剪增加水印裁剪攻击能力
        self.crop_attack = args_parser.get("crop") in ("True", "true")
        print("crop attack:{}".format(self.crop_attack))

        # init extract network
        print("initial WM extract network")
        self.extractor = Extractor(
            self.config_loader.config["dataset"]["image_size"]["value"])
        # 注册
        self.model_map["extractor"] = self.extractor

        # load watermark
        # 保留老版本的编码方法
        if self.decoder_path != "new":
            self.watermark_target = train_tool.read_image(
                self.wm_path, self.config_loader.image_width, self.config_loader.image_height, change_scale=True)
        else:
            self.watermark_target = encode_watermark_from_image(
                self.wm_path, self.config_loader.image_width, self.config_loader.image_height)
        print("load watermark successfully...")

        # create negitive. if no watermark, a 1 matrix will be out
        self.negitive_target = tf.zeros(
            shape=[1, self.config_loader.image_width, self.config_loader.image_height, 3])*1
        print("create negative watermark successfully...")

        # the pretrained encoder-decoder model
        # @update 2019.11.27
        # @author anomymity
        # 修复相对路径bug,否则无法载入模型
        # 新版本不需要解码器预训练
        if self.decoder_path != "new":
            self.decoder_model = EncoderDecoder(self.decoder_path)
            print("load decoder successfully...")

        # 调用父类
        super(WMContainer, self).on_prepare()