def transform_from_internal_poly(self, content):

        # encode image
        enc_c, enc_c_layers = self.encode(content)
        self.normalized, self.meanC, self.sigmaC = normalize(enc_c)

        INTERPOLATE_NUM = settings.config["INTERPOLATE_NUM"]
        BATCH_SIZE = settings.config["BATCH_SIZE"]
        DIM = settings.config["DECODER_DIM"]
        STORE_SHAPE = [BATCH_SIZE] + DIM

        self.store_var = tf.Variable(
            tf.zeros(STORE_SHAPE), dtype=tf.float32, trainable=False)
        self.internal_sigma = tf.placeholder(
            tf.float32, shape=(BATCH_SIZE, INTERPOLATE_NUM, 1, 1, DIM[2]))
        self.internal_mean = tf.placeholder(tf.float32, shape=(
            BATCH_SIZE, INTERPOLATE_NUM, 1, 1, DIM[2]))

        self.coef_ph = tf.placeholder(tf.float32, shape=[BATCH_SIZE, INTERPOLATE_NUM])
        
        with tf.variable_scope("transform"):
            self.coef = tf.get_variable("coef", shape=[BATCH_SIZE, INTERPOLATE_NUM],
                                        initializer=tf.ones_initializer())
            self.coef_asgn = tf.assign(self.coef, self.coef_ph)
            method = "relu"
            if method == "relu":
                postive_coef = tf.nn.relu(self.coef)
                sum_coef = tf.reduce_sum(postive_coef, axis=1, keepdims=True)
                coef_poss = postive_coef / (sum_coef + 1e-7)
                self.regulate = tf.assign(self.coef, coef_poss)
                coef_poss = tf.reshape(
                    coef_poss, shape=[BATCH_SIZE, INTERPOLATE_NUM, 1, 1, 1])
            elif method =="softmax":
                coef = self.coef * 2 # control the gradient not to be too large
                coef_poss = tf.nn.softmax(coef, axis=-1)
                coef_poss = tf.reshape(coef_poss, shape=[BATCH_SIZE, INTERPOLATE_NUM, 1, 1, 1])
                self.regulate = []

        self.store_normalize = [tf.assign(self.store_var, self.normalized), self.coef.initializer]
        self.sigma_poly = tf.reduce_sum(self.internal_sigma*coef_poss, axis=1)
        self.mean_poly = tf.reduce_sum(self.internal_mean*coef_poss, axis=1)
        self.set_stat(self.mean_poly,self.sigma_poly)
        self.restored_internal = self.store_var * self.sigma_poly + self.mean_poly
        self.target_features = self.restored_internal

        self.loss_l1 = tf.reduce_sum(tf.abs(enc_c - self.target_features))

        generated_adv_img = self.decode(self.target_features)
        generated_img = self.decode(enc_c)

        return generated_img, generated_adv_img
    def transform_from_internal(self, content, store_var, sigma, mean):

        # encode image
        enc_c, enc_c_layers = self.encode(content)

        self.normalized, self.meanC, self.sigmaC = normalize(enc_c)

        self.store_normalize = tf.assign(store_var, self.normalized)
        self.set_stat(mean, sigma)
        self.restored_internal = store_var * sigma + mean
        self.target_features = self.restored_internal

        self.loss_l1 = tf.reduce_sum(tf.abs(enc_c - self.target_features))
        
        generated_adv_img = self.decode(
            self.target_features)
        generated_img = self.decode(enc_c)

        return generated_img, generated_adv_img
    def transform_from_internal(self, content, store_var, sigma, mean):
        content = tf.reverse(content, axis=[-1])
        #style = tf.reverse(style,   axis=[-1])

        # preprocess image
        content = self.encoder.preprocess(content)
        #style = self.encoder.preprocess(style)

        # encode image
        enc_c, enc_c_layers = self.encoder.encode(content)

        self.normalized, self.meanC, self.sigmaC = normalize(enc_c)

        self.store_normalize = tf.assign(store_var, self.normalized)
        self.restored_internal = store_var * sigma + mean
        self.target_features = self.restored_internal

        self.loss_l1 = tf.reduce_sum(tf.abs(enc_c - self.target_features))

        generated_adv_img = self.deocde_to_real(
            self.decoder.decode(self.target_features))
        generated_img = self.deocde_to_real(self.decoder.decode(enc_c))

        return generated_img, generated_adv_img
    content_loss = tf.reduce_sum(
        tf.reduce_mean(tf.square(enc_gen_adv - target_features), axis=[1, 2]))
    #
    #content_loss += tf.reduce_sum(tf.reduce_mean(
    #    tf.square(enc_gen - stn.norm_features), axis=[1, 2]))

    # compute the style loss

    style_layer_loss = []

    # compute the total loss
    # adv_loss * adv_weight
    loss = content_loss + tf.reduce_sum(
        adv_loss * BATCH_SIZE * adv_weight)  # style_weight * style_loss

    l2_embed = normalize(enc_gen)[0] - normalize(stn.norm_features)[0]
    l2_embed = tf.reduce_mean(
        tf.sqrt(tf.reduce_sum((l2_embed * l2_embed), axis=[1, 2, 3])))

    loss = loss
    if data_set == "cifar10":
        classifier_vars = get_scope_var("model")
    decoder_vars = get_scope_var("decoder")
    # Training step
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.inverse_time_decay(LEARNING_RATE, global_step,
                                                DECAY_STEPS, LR_DECAY_RATE)
    #tf.train.AdamOptimizer(learning_rate).minimize(  # MomentumOptimizer(learning_rate, momentum=0.9) tf.train.GradientDescentOptimizer(learning_rate)
    #train_op = tf.train.AdamOptimizer(learning_rate).minimize(
    #    loss, var_list=stn_vars, global_step=global_step)
Exemple #5
0
        classifier = build_imagenet_model(adv_img_bgr, label, conf=1)
        adv_loss = -classifier.target_loss5
        adv_acc = classifier.accuracy
        adv_acc_y = classifier.acc_y
        adv_acc_y_5 = classifier.acc_y_5
        content_bgr = tf.reverse(content, axis=[-1])  # switch RGB to BGR
        classifier = build_imagenet_model(content_bgr, label, reuse=True)
        normal_loss = -classifier.target_loss5
        norm_acc = classifier.accuracy
        acc_y = classifier.acc_y
        acc_y_5 = classifier.acc_y_5
        classifier = build_imagenet_model(img_bgr, label, reuse=True)
        decode_acc_y = classifier.acc_y
        decode_acc_y_5 = classifier.acc_y_5

    l2_embed_d = normalize(enc_gen_adv)[0] - normalize(stn.norm_features)[0]
    l2_embed = tf.sqrt(tf.reduce_sum((l2_embed_d * l2_embed_d), axis=[1, 2,
                                                                      3]))

    # compute the content loss
    bar = 3000 / 64 / 128
    content_loss_y = tf.reduce_sum(tf.reduce_mean(tf.square(enc_gen_adv -
                                                            target_features),
                                                  axis=[1, 2]),
                                   axis=-1)
    content_loss = tf.reduce_sum(
        tf.reduce_mean(tf.square(enc_gen_adv - target_features), axis=[1, 2]))
    #
    #content_loss += tf.reduce_sum(tf.reduce_mean(
    #    tf.square(enc_gen - stn.norm_features), axis=[1, 2]))