def __init__(self, model_VarList, model, epsilon, num_steps, step_size,
                 random_start, dataset_type, config):

        self.x_Aadv, self.x_Aadv_attack, self.y_Ainput, self.is_training = model_VarList
        self.random_start = random_start
        self.epsilon = epsilon
        if dataset_type == 'cifar10' or dataset_type == 'cifar100' or dataset_type == 'imagenet':
            self.upper = 255
        else:
            self.upper = 1

        x_max = tf.clip_by_value(self.x_Aadv + epsilon, 0, self.upper)
        x_min = tf.clip_by_value(self.x_Aadv - epsilon, 0, self.upper)
        print("the attack upper", self.upper, "the epsilon", epsilon,
              "step size", step_size)

        new_adv_x = self.x_Aadv_attack

        for loop in range(num_steps):
            _, a_Axent_temp, _, _, _, _, _, _ = \
                                    model._encoder(new_adv_x, self.y_Ainput, self.is_training,
                                                   mask_effective_attack=config['mask_effective_attack'])
            fsm_grad = tf.sign(tf.gradients(a_Axent_temp, new_adv_x)[0])
            new_adv_x = new_adv_x + fsm_grad * step_size
            new_adv_x = tf.clip_by_value(new_adv_x, x_min, x_max)
            new_adv_x = tf.stop_gradient(new_adv_x)

        self.final_new_adv_attack = new_adv_x
        layer_values_Ap_emb, _, _, _, _, _, _, _ = \
            model._encoder(self.final_new_adv_attack, self.y_Ainput, self.is_training,
                           mask_effective_attack=config['mask_effective_attack'])

        self.Ap_emb = reshape_cal_len(layer_values_Ap_emb['x4'])[0]
        self.invariance_generator = InvarianceGenerator()
    def forward_(self, fea_input, recos_target, is_training):
        with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
            self.fea_input = fea_input

            self.is_training = is_training

            print('self.fea_input', self.fea_input)
            x, dims = reshape_cal_len(self.fea_input)

            W_conv1 = self._weight_variable([dims, 1024], scope='w1')
            b_conv1 = self._bias_variable([1024], scope='b1')
            h_conv1 = tf.nn.xw_plus_b(x, W_conv1, b_conv1)
            h_conv1 = self._batch_norm('bn11', h_conv1)
            h_conv1 = tf.nn.relu(h_conv1)

            W_conv11 = self._weight_variable([1024, 1024], scope='conv_w11')
            b_conv11 = self._bias_variable([1024], scope='b11')
            h_conv1 = tf.nn.xw_plus_b(h_conv1, W_conv11, b_conv11)
            h_conv1 = self._batch_norm('bn12', h_conv1)
            h_conv1 = tf.nn.relu(h_conv1)

            W_fc2 = self._weight_variable([1024, 3*32*32], scope='fcw2')
            b_fc2 = self._bias_variable([3*32*32], scope='fcb2')

            x_last = tf.matmul(h_conv1, W_fc2) + b_fc2

            output = tf.reshape(x_last, [-1, 32, 32, 3])

            print('recos_target shape', recos_target.shape)

            recos_target_norm = recos_target/255-0.5
            loss = tf.reduce_mean((output - recos_target_norm) ** 2)

        return loss, output, recos_target_norm
Пример #3
0
            'layer_values_A1': layer_values_A1,
            'n_Amean_xent_d': n_Amean_xent_d,
            'n_Aaccuracy_d': n_Aaccuracy_d
        })

### Calculate triplet loss
triplet_loss_data_A_Ap_B = dict()
mse_loss_A_Ap = dict()
mse_loss_A_B = dict()
triplet_loss_data_A1_Ap_B_list = [dict() for _ in range(A1_Ap_B_num)]
# mse_loss_data_A1_Ap_B_list = [dict() for _ in range(A1_Ap_B_num)]

triplet_loss_data_B_Bp_A = dict()

#construct embed
f_x4_nat = reshape_cal_len(layer_values_A['x4'])[0]

for layer_name in triplet_loss_layers:
    if Use_A_Ap_B:
        if switch_an_neg:
            anchor = layer_values_A[
                layer_name]  # TODO: already switched anchor and pos
            pos = layer_values_Ap[layer_name]
        else:
            pos = layer_values_A[
                layer_name]  #TODO: already switched anchor and pos
            anchor = layer_values_Ap[layer_name]
        if Use_B_Bp_A:
            neg = layer_values_Bp[layer_name]
        else:
            neg = layer_values_B[layer_name]