Exemplo n.º 1
0
    def compute_loss_function(self, each_batch, extra):
        """
        本训练不需要extra参数
        计算输出图片与目标的损失函数
        返回损失
        """
        input_image, target = each_batch

        # 计算生成网络输出图像
        gen_output = self.generator(input_image, training=True)

        #提取水印
        wm_output_p = self.extractor(gen_output, training=True)

        wm_output_n = self.extractor(target, training=True)

        # mean absolute error
        pixel_loss = loss_util.pixel_loss(gen_output, target)

        wm_loss = loss_util.pixel_loss(wm_output_p,
                                       self.wm) + loss_util.pixel_loss(
                                           wm_output_n, self.noise)

        total_loss = pixel_loss + wm_loss

        return {
            "opt": total_loss
        }, {
            "total_loss": total_loss,
            "gen_loss": pixel_loss,
            "wm_loss": wm_loss
        }
    def compute_loss_function(self, task_batch, secret_batch):

        origin,target=task_batch

        
        # secret_batch=secret_batch*self.mask

        # stego网络
        inputs=tf.concat([origin, secret_batch], axis=-1)
        stego = self.encoder(inputs, training=True)


        secret_outputs=self.decoder(stego, training=True)

        encoder_loss=loss_util.pixel_loss(stego, origin)

        decoder_loss=loss_util.pixel_loss(secret_outputs, secret_batch)

        #目标网络
        task_outputs = self.encoder(tf.concat([origin, tf.zeros_like(origin)],axis=-1), training=True)
        task_loss=loss_util.pixel_loss(target, task_outputs)

        loss = encoder_loss+decoder_loss+task_loss

        # 返回结果集
        return {"optimizer": loss}, {"encoder loss": encoder_loss, "decoder loss":decoder_loss,"task loss":task_loss}
Exemplo n.º 3
0
    def compute_loss(self, input_image, target):
        """
        计算损失函数,继承父类,在此基础上加上水印损失
        """
        # 计算任务损失函数
        result_set = super(WMContainer, self).compute_loss(input_image, target)

        # 开始计算水印损失函数

        # no attack
        gen_output = result_set["gen_output"]

        ext_input = gen_output

        # 攻击pipline
        # create watermark
        if self.noise_attack == True:
            # create normal noise sigma from 0-0.4
            sigma = random.random()*0.4
            # noise attack, the sigma is 0-0.4, mean is 0
            normal_noise = np.random.normal(0, scale=sigma, size=[128, 128, 3])
            # 添加噪声
            ext_input = gen_output+normal_noise

        if self.crop_attack == True:
            # 创建掩码
            crop_mask = np.ones([1, 128, 128, 3], dtype=np.float32)
            # 裁剪长度为0-50个像素宽度
            crop_width = np.random.randint(0, 50)
            crop_mask[:, :, 0:crop_width, :] = 0
            # 裁剪
            ext_input = tf.multiply(gen_output, crop_mask)+crop_mask-1

        # extract the gen output (with watermark)=>watermark
        extract_watermark = self.extractor(ext_input, training=True)

        # negitive samples
        extract_negitive = self.extractor(target, training=True)

        # watermark error, close to watermark target
        watermark_possitive_loss = loss_util.pixel_loss(
            self.watermark_target, extract_watermark)

        # negitive error, close to noise (all ones)
        watermark_negitive_loss = loss_util.pixel_loss(
            self.negitive_target, extract_negitive)

        watermark_total_loss = self.lambda_wm_positive * watermark_possitive_loss + \
            self.lambda_wm_negitive*watermark_negitive_loss

        # total loss, 在原先的基础上,增加水印损失
        result_set["loss_set"]["total_gen_loss"] = result_set["loss_set"]["total_gen_loss"] + \
            watermark_total_loss

        return result_set
Exemplo n.º 4
0
    def compute_loss_function(self, each_batch, extra_batch_data):
        """
        损失函数
        Returns:
            [type] -- [description]
        """

        # 分离输入与标签
        inputs, labels = each_batch

        message = extra_batch_data

        #常规
        outputs = self.model(tf.concat([inputs, self.style_batch], axis=-1),
                             training=True)

        #隐藏
        hidden_message = self.hide_model(
            tf.concat([message, self.style_batch], axis=-1))
        stego = self.model(tf.concat([inputs, hidden_message], axis=-1),
                           training=True)

        #提取
        message_output = self.reveal_model(tf.concat(
            [stego, tf.zeros_like(stego)], axis=-1),
                                           training=True)

        # 任务损失
        style_loss = loss_util.pixel_loss(outputs, labels)

        #隐藏损失
        hide1_loss = loss_util.pixel_loss(hidden_message, self.style_batch)
        hide2_loss = loss_util.pixel_loss(stego, labels)

        #提取
        reveal_loss = loss_util.pixel_loss(message_output, message)

        #总
        loss = style_loss + hide1_loss + hide2_loss + reveal_loss
        # loss=style_loss

        # 返回结果集
        return {
            "optimizer": loss
        }, {
            "loss": loss,
            "s": style_loss,
            "h1": hide1_loss,
            "h2": hide2_loss,
            "r": reveal_loss
        }
    def on_train_batch(self, input_image, target):

        # 创建梯度计算器,负责计算损失函数当前梯度
        with tf.GradientTape() as tape:
            # encoder
            encoder_output = self.encoder(input_image, training=True)

            # crop in random, to train robust decode
            encoder_output = crop_image(encoder_output, self.mask_list)

            # decoder
            decoder_output = self.decoder(encoder_output, training=True)

            # loss
            loss = loss_util.pixel_loss(target, decoder_output)

        # gather variables
        # Todo i am not sure it is right to gather like this way in Tensorflow 2.0
        variables = self.encoder.trainable_variables + self.decoder.trainable_variables

        # calculate gradients and optimize
        gradients = tape.gradient(target=loss, sources=variables)
        self.optimizer.apply_gradients(
            grads_and_vars=zip(gradients, variables))

        # 整理结果
        loss_set = {"train_loss": loss}
        result_set = {"loss_set": loss_set}

        return result_set
Exemplo n.º 6
0
    def compute_loss_function(self, cover_batch, secret_batch):

        inputs = tf.concat([cover_batch, secret_batch], axis=-1)
        stego = self.encoder(inputs, training=True)

        outputs = self.decoder(stego, training=True)

        encoder_loss = loss_util.pixel_loss(stego, cover_batch)

        decoder_loss = loss_util.pixel_loss(outputs, secret_batch)

        loss = encoder_loss + decoder_loss

        # 返回结果集
        return {
            "optimizer": loss
        }, {
            "encoder loss": encoder_loss,
            "decoder loss": decoder_loss
        }
Exemplo n.º 7
0
    def compute_loss_function(self, each_batch, extra):
        """
        本训练不需要extra参数
        计算输出图片与目标的损失函数
        返回损失
        """
        input_image, target = each_batch

        # 计算生成网络输出图像
        gen_output = self.generator(input_image, training=True)

        # mean absolute error
        pixel_loss = loss_util.pixel_loss(gen_output, target)

        if self.config_loader.discriminator != "no":

            # 输入真实的图像,计算判决网络输出
            disc_real_output = self.discriminator([input_image, target],
                                                  training=True)

            # 输入生成的图像,计算判决网络输出
            disc_generated_output = self.discriminator(
                [input_image, gen_output], training=True)

            # 计算GAN损失
            gen_loss, disc_loss = loss_util.gan_loss(disc_real_output,
                                                     disc_generated_output)

            # 总的生成网络损失
            total_gen_loss = gen_loss + (100 * pixel_loss)

        else:
            total_gen_loss = pixel_loss

        # 合并结果集
        loss_map = {}
        display_map = {}

        loss_map["generator_opt"] = total_gen_loss
        display_map["gen_loss"] = total_gen_loss
        # 若判决器存在,其损失才会被记录
        if self.config_loader.discriminator != "no":
            loss_map["discriminator_opt"] = disc_loss
            display_map["disc_loss"] = disc_loss

        # 返回损失
        return loss_map, display_map
    def on_test_epoch(self, current_epoch, loss_set):

        # save test result
        if current_epoch % self.config_loader.save_period == 0:
            # test loss on test set
            test_loss = 0
            for image_num, (input_image) in self.test_dataset.enumerate():
                encoder_output = self.encoder(input_image, training=True)
                # decoder
                decoder_output = self.decoder(encoder_output, training=True)
                # calculate loss
                test_loss = test_loss + \
                    loss_util.pixel_loss(input_image, decoder_output)

            # calculate mean loss
            test_loss = test_loss / float(image_num)

            loss_set["test_loss"] = test_loss
            # 保存损失
            self.log_tool.save_loss(loss_set)

            # 测试可视化结果
            for test_image, _ in self.test_dataset.take(1):

                # show encoder output
                encoder_output = self.encoder(test_image, training=True)
                # crop the encoder output
                encoder_output = crop_image(encoder_output, self.mask_list)
                # decoder
                decoder_output = self.decoder(encoder_output, training=True)
                titles = ["IN", "EN", "DE"]
                image_list = [
                    test_image,
                    tf.reshape(encoder_output,
                               [1, self.encode_width, self.encode_width, 3]),
                    decoder_output
                ]
                self.log_tool.save_image_list(image_list=image_list,
                                              title_list=titles)

        # 调用父类方法
        super(DecoderContainer, self).on_test_epoch(current_epoch, loss_set)