예제 #1
0
    def naive_discriminator(self, image, y=None, reuse=False):
        with tf.variable_scope("discriminator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False

            h0 = lrelu(conv2d(image, self.df_dim, name='adv_d_h0_conv'))
            # h0 is (128 x 128 x self.df_dim)
            h1 = lrelu(
                layer_norm((conv2d(h0, self.df_dim * 2, name='adv_d_h1_conv')),
                           name="adv_d_ln1"))
            # h1 is (64 x 64 x self.df_dim*2)
            h2 = lrelu(
                layer_norm(conv2d(h1, self.df_dim * 4, name='adv_d_h2_conv'),
                           name="adv_d_ln2"))
            # h2 is (32x 32 x self.df_dim*4)
            h3 = lrelu(
                layer_norm(conv2d(h2,
                                  self.df_dim * 8,
                                  d_h=1,
                                  d_w=1,
                                  name='adv_d_h3_conv'),
                           name="adv_d_ln3"))
            # h3 is (16 x 16 x self.df_dim*8)
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1,
                        'adv_d_h3_lin')

            return tf.nn.sigmoid(h4), h4
예제 #2
0
 def _residual_block(self, x, n_out, name='residual'):
     with tf.variable_scope(name):
         with tf.variable_scope('shortcut'):
             x1 = downsample2x(x)
             x1 = conv2d(x1, n_out, self.k_size, 1, 'SAME')
         with tf.variable_scope('normal'):
             x2 = layer_norm(x, name='layer_norm_0')
             x2 = tf.nn.relu(x2)
             x2 = conv2d(x2, n_out, self.k_size, 1, 'SAME', name='conv2d_0')
             x2 = layer_norm(x2, name='layer_norm_1')
             x2 = tf.nn.relu(x2)
             x2 = downsample2x(x2)
             x2 = conv2d(x2, n_out, self.k_size, 1, 'SAME', name='conv2d_1')
         return x1 + x2
예제 #3
0
파일: model.py 프로젝트: liuaishan/AdvPGAN
    def naive_discriminator(self, image, y = None, reuse = False):
        with tf.variable_scope("discriminator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False

            h0 = lrelu(conv2d(image,  self.df_dim, name='adv_d_h0_conv'))
            # h0 is (128 x 128 x self.df_dim)
            h1 = lrelu(layer_norm((conv2d(h0, self.df_dim * 2, name='adv_d_h1_conv')), name="adv_d_ln1"))
            # h1 is (64 x 64 x self.df_dim*2)
            h2 = lrelu(layer_norm(conv2d(h1, self.df_dim * 4, name='adv_d_h2_conv'), name="adv_d_ln2"))
            # h2 is (32x 32 x self.df_dim*4)
            h3 = lrelu(layer_norm(conv2d(h2, self.df_dim * 8, d_h=1, d_w=1, name='adv_d_h3_conv'), name="adv_d_ln3"))
            # h3 is (16 x 16 x self.df_dim*8)
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'adv_d_h3_lin')

            return tf.nn.sigmoid(h4), h4
예제 #4
0
파일: model.py 프로젝트: liuaishan/ps_face
    def generator_pix2pix(self, image, reuse=False):
        output_size = self.patch_size
        s = math.ceil(output_size/16.0)*16
        s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
        # gf_dim = 16 # Dimension of gen filters in first conv layer.
        with tf.variable_scope("generator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False
            # do we need here???
            #image = image / 255.0
            # liuas 2018.5.9
            # trick: using lrelu instead of relu

            ngf = 16 # number of generator filters in first conv layer
            # encoder_1: [batch, 16, 16, 3] => [batch, 8, 8, ngf]
            conv1 = conv2d(image, ngf, k_h=4, k_w=4, name='adv_g_enc1')
            conv2 = layer_norm(conv2d(lrelu(conv1, 0.2), ngf*2, k_h=4, k_w=4, name='adv_g_enc2'), name='adv_g_enc2ln')
            conv3 = layer_norm(conv2d(lrelu(conv2, 0.2), ngf*4, k_h=4, k_w=4, name='adv_g_enc3'), name='adv_g_enc3ln')
            conv4 = layer_norm(conv2d(lrelu(conv3, 0.2), ngf*8, k_h=4, k_w=4, name='adv_g_enc4'), name='adv_g_enc4ln')
            deconv1, _, _ = deconv2d(tf.nn.relu(conv4), [self.batch_size, s8, s8, ngf*4], k_h=4, k_w=4, name='adv_g_dec1', with_w=True)
            deconv1 = layer_norm(deconv1, name="adv_g_dec1ln")
            input = tf.concat([deconv1, conv3], axis=3)
            deconv2, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s4, s4, ngf*2], k_h=4, k_w=4, name='adv_g_dec2', with_w=True)
            deconv2 = layer_norm(deconv2, name="adv_g_dec2ln")
            input = tf.concat([deconv2, conv2], axis=3)
            deconv3, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s2, s2, ngf], k_h=4, k_w=4, name='adv_g_dec3', with_w=True)
            deconv3 = layer_norm(deconv3, name="adv_g_dec3ln")
            input = tf.concat([deconv3, conv1], axis=3)
            deconv4, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, output_size, output_size, 3], k_h=4, k_w=4, name='adv_g_dec4', with_w=True)

            return tf.tanh(deconv4)
예제 #5
0
파일: model.py 프로젝트: liuaishan/AdvPGAN
    def generator_pix2pix(self, image, reuse=False):
        output_size = self.patch_size
        s = math.ceil(output_size/16.0)*16
        s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
        # gf_dim = 16 # Dimension of gen filters in first conv layer.
        with tf.variable_scope("generator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False
            # do we need here???
            #image = image / 255.0
            # liuas 2018.5.9
            # trick: using lrelu instead of relu

            ngf = 16 # number of generator filters in first conv layer
            # encoder_1: [batch, 16, 16, 3] => [batch, 8, 8, ngf]
            conv1 = conv2d(image, ngf, k_h=4, k_w=4, name='adv_g_enc1')
            conv2 = layer_norm(conv2d(lrelu(conv1, 0.2), ngf*2, k_h=4, k_w=4, name='adv_g_enc2'), name='adv_g_enc2ln')
            conv3 = layer_norm(conv2d(lrelu(conv2, 0.2), ngf*4, k_h=4, k_w=4, name='adv_g_enc3'), name='adv_g_enc3ln')
            conv4 = layer_norm(conv2d(lrelu(conv3, 0.2), ngf*8, k_h=4, k_w=4, name='adv_g_enc4'), name='adv_g_enc4ln')
            deconv1, _, _ = deconv2d(tf.nn.relu(conv4), [self.batch_size, s8, s8, ngf*4], k_h=4, k_w=4, name='adv_g_dec1', with_w=True)
            deconv1 = layer_norm(deconv1, name="adv_g_dec1ln")
            input = tf.concat([deconv1, conv3], axis=3)
            deconv2, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s4, s4, ngf*2], k_h=4, k_w=4, name='adv_g_dec2', with_w=True)
            deconv2 = layer_norm(deconv2, name="adv_g_dec2ln")
            input = tf.concat([deconv2, conv2], axis=3)
            deconv3, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s2, s2, ngf], k_h=4, k_w=4, name='adv_g_dec3', with_w=True)
            deconv3 = layer_norm(deconv3, name="adv_g_dec3ln")
            input = tf.concat([deconv3, conv1], axis=3)
            deconv4, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, output_size, output_size, 3], k_h=4, k_w=4, name='adv_g_dec4', with_w=True)

            return tf.tanh(deconv4)
예제 #6
0
파일: model.py 프로젝트: liuaishan/ps_face
    def naive_discriminator(self, image, y = None, reuse = False):
        with tf.variable_scope("discriminator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False

            h0 = lrelu(conv2d(image,  self.df_dim, name='adv_d_h0_conv'))
            # h0 is (128 x 128 x self.df_dim)
            h1 = lrelu(layer_norm((conv2d(h0, self.df_dim * 2, name='adv_d_h1_conv')), name="adv_d_ln1"))
            # h1 is (64 x 64 x self.df_dim*2)
            h2 = lrelu(layer_norm(conv2d(h1, self.df_dim * 4, name='adv_d_h2_conv'), name="adv_d_ln2"))
            # h2 is (32x 32 x self.df_dim*4)
            h3 = lrelu(layer_norm(conv2d(h2, self.df_dim * 8, d_h=1, d_w=1, name='adv_d_h3_conv'), name="adv_d_ln3"))
            # h3 is (16 x 16 x self.df_dim*8)
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'adv_d_h3_lin')

            return tf.nn.sigmoid(h4), h4

    # build cGAN model
        def build_model(self):
            # adversarial patch generated by G
            self.fake_patch = self.generator_pix2pix(self.real_patch)

            # overlay adversarial patch on image to generate adversarial example
            self.fake_image = self.pad_patch_on_image(image=self.real_image, patch=self.fake_patch, if_random=False)

            # classify result from target model

            self.fake_logits_f, self.fake_prob_f = self.target_model_discriminator(self.fake_image)

            # fake image result from naive D
            self.fake_logits_d, self.fake_prob_d = self.naive_discriminator(self.fake_image)

            # real image result from naive D
            self.real_logits_d, self.real_prob_d = self.naive_discriminator(self.real_image, reuse=True)

            # targeted attack
            self.target = 40
            self.target_hat = tf.one_hot(self.target, self.class_num)
            self.target_hat_batch = self.target_hat.unsqueeze(0)
            self.target_hat = self.target_hat.unsqueeze(0)
            for i in range(self.batch_size - 1):
                self.target_hat_batch = torch.cat([self.target_hat_batch, self.target_hat], 0)

            # The loss of AdvPGAN consists of: GAN loss, patch loss and adversarial example loss
            # 1.GAN loss for D and G


            self.loss_d_adv = torch.mean(torch.nn.CrossEntropyLoss(input=self.real_logits_d, target=torch.ones_like(
            self.real_prob_d)) + torch.nn.CrossEntropyLoss(input=self.fake_logits_d, labels=tf.zeros_like(self.fake_prob_d)))

            self.loss_g_adv = torch.mean(
            torch.nn.CrossEntropyLoss(input=self.fake_logits_d, target=torch.ones_like(self.fake_prob_d)))

            # 2.patch loss
            x1 = np.array(self.real_patch - self.fake_patch)
            x2 = np.zero(x1.shape)
            self.patch_loss = (np.linalg.norm(x1 – x2) ** 2) / 2

            # 3.adversarial example loss
            sor_loss = torch.nn.CrossEntropyLoss(input=self.fake_logits_d, target=self.y)
            array_loss = np.array(F.softmax(tensor_loss, dim=0))
            self.ae_loss = np.mean(array_loss)

            # 4.similarity loss between patch and padded place on traffic sign
            x1 = np.array(self.real_image - self.fake_image)
            x2 = np.zero(x1.shape)
            self.pad_sim_loss = self.rho * (np.linalg.norm(x1 – x2) ** 2) / 2

            # 4.gradient penalty
            self.real_data = torch.reshape(self.real_image, [self.batch_size, -1])
            self.fake_data = torch.reshape(self.fake_image, [self.batch_size, -1])
            self.LAMBDA = 10
            self.gra_pen_alpha = torch.rand([self.batch_size, 1])
            self.differences = self.fake_data - self.real_data
            self.interpolates = self.real_data + (self.gra_pen_alpha * self.differences)
            self.interpolates_reshaped = torch.reshape(self.interpolates,
                                           [self.batch_size, self.image_size, self.image_size, self.image_channel])
            _, self.results = self.naive_discriminator(self.interpolates_reshaped, reuse=True)
            self.gradients = gradients(self.results, [self.interpolates])[0]
            #self.gradients = tf.gradients(self.results, [self.interpolates])[0]
            self.slopes = torch.sum(self.gradients ** 2, dim=[1]) ** 2
            tensor_penalty = (self.slopes - 1.) ** 2
            array_penalty = np.array(tensor_penalty)
            self.gradient_penalty = self.LAMBDA * np.mean(array_penalty)

            # overall loss for D and G
            self.g_loss = self.alpha * self.loss_g_adv + self.beta * self.patch_loss + self.gamma * self.ae_loss + self.delta * self.pad_sim_loss
            self.d_loss = self.loss_d_adv + self.gradient_penalty

            # accuracy for classification rate of target model
            self.predictions = torch.argmax(self.fake_prob_f, 1)
            self.real_label = torch.argmax(self.y, 1)
            self.accuracy = torch.mean(torch.FloatTensor(torch.equal(self.predictions, self.real_label)))

            # get all trainable variables for G and D, respectively
            t_vars = tf.trainable_variables()
            self.d_vars = [var for var in t_vars if 'adv_d_' in var.name]
            self.g_vars = [var for var in t_vars if 'adv_g_' in var.name]

            # initialize a saver
            self.saver = tf.train.Saver()
예제 #7
0
def resnet_block(inputs, in_channels, out_channels, scale, block_scope,
                 is_training, reuse, discriminator_normalization,
                 is_gen_block):
    assert scale in ["up", "down", "none"]
    # if inputs.get_shape().as_list()[-1] != in_channels:
    #   raise ValueError("Unexpected number of input channels.")

    # In SN paper, if they upscale in generator they do this in the first conv.
    # For discriminator downsampling happens after second conv.
    if is_gen_block:
        # Generator block
        scale1 = scale  # "up" or "none"
        scale2 = "none"
    else:
        # Discriminator block.
        scale1 = "none"
        scale2 = scale  # "down" or "none"

    print("resnet_block, in=%d out=%d, scale=%s, scope=%s normalizer=%s" %
          (in_channels, out_channels, scale, block_scope,
           discriminator_normalization))
    print("INPUTS: ", inputs.get_shape())
    with tf.variable_scope(block_scope, values=[inputs], reuse=reuse):
        output = inputs
        use_sn = discriminator_normalization == consts.SPECTRAL_NORM

        # Define the skip connection, ensure 'conv' is in the suffix, otherwise it
        # will not be regularized.

        shortcut = get_conv(output,
                            in_channels,
                            out_channels,
                            scale,
                            suffix="conv_shortcut",
                            use_sn=use_sn)
        print("SHORTCUT: ", shortcut.get_shape())

        # Apply batch norm in discriminator only if enabled.
        if is_gen_block or discriminator_normalization == consts.BATCH_NORM:
            output = batch_norm_resnet(output,
                                       is_training=is_training,
                                       scope="bn1")
        elif discriminator_normalization == consts.LAYER_NORM:
            output = ops.layer_norm(output,
                                    is_training=is_training,
                                    scope="ln1")

        output = tf.nn.relu(output)
        output = get_conv(output,
                          in_channels,
                          out_channels,
                          scale1,
                          suffix="conv1",
                          use_sn=use_sn)
        print("OUTPUT CONV1: ", output.get_shape())

        # Apply batch norm in discriminator only if enabled.
        if is_gen_block or discriminator_normalization == consts.BATCH_NORM:
            output = batch_norm_resnet(output,
                                       is_training=is_training,
                                       scope="bn2")
        elif discriminator_normalization == consts.LAYER_NORM:
            output = ops.layer_norm(output,
                                    is_training=is_training,
                                    scope="ln2")

        output = tf.nn.relu(output)
        output = get_conv(output,
                          out_channels,
                          out_channels,
                          scale2,
                          suffix="conv2",
                          use_sn=use_sn)
        print("OUTPUT CONV2: ", output.get_shape())

        # Combine skip-connection with the convolved part.
        output += shortcut

        return output