示例#1
0
def cost_slim(repShE_S, repShE_T, logits, labels, shape_source, shape_target):
    #loss_similarity = cost_Similarity(repShE_S, repShE_T) 	# summary done in cost_Similarity()
    loss_similarity = mmd.rbf_mmd2(repShE_S, repShE_T, shape_source,
                                   shape_target)
    loss_classifier = cost_Classifier(logits, labels)
    tf.summary.scalar("Classifier loss", loss_classifier)
    tf.add_to_collection('losses', loss_similarity)
    tf.add_to_collection('losses', loss_classifier)
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
示例#2
0
nt = 20  # number of target points

nb_tr = 3  # number of trials for averaging

num_src = 5  # number of source domains

# variables to stock the results
lambdaWa = []
Wdista = []
MMDa = []
true_errora = []

# variables used in teano for mmd calculation
Xth, Yth = T.matrices('X', 'Y')
sigmath = T.scalar('sigma')
fn = theano.function([Xth, Yth, sigmath], mmd.rbf_mmd2(Xth, Yth,
                                                       sigma=sigmath))

a, b = np.ones((ns, )) / ns, np.ones(
    (nt, )) / nt  # empirical distributions for source and target domains
reg = 1e-1  # entropic regularization for \lambda computation
Mb = ot.utils.dist0(ns)  # cost matrix on bins
Mb /= Mb.max()  # normalization

if plot_moons:  # to plot the data (avoid when len(theta_range)>5)
    fig, axes = plt.subplots(len(theta_range), num_src, figsize=(21, 16))

for j, it in enumerate(theta_range):
    lambdaW = []
    Wdist = []
    MMD = []
    true_error = []
def mmd_score(res_i, res_j, sigma, biased=True):

    res_i = res_i.reshape(-1, 1)
    res_j = res_j.reshape(-1, 1)
    return mmd.rbf_mmd2(res_i, res_j, sigma, biased)
示例#4
0
    def build_model(self):
        # some parameters
        image_dims = [self.input_height, self.input_width, self.c_dim]
        bs = self.batch_size

        """ Graph Input """
        # images
        self.inputs = tf.placeholder(tf.float32, [bs] + image_dims,
                                     name='real_images')

        # labels
        self.y = tf.placeholder(tf.float32, [bs, self.y_dim], name='y')

        # noises
        self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')

        """ Loss Function """

        # output of D for real images
        D_real, D_real_logits, _ = self.discriminator(
            self.inputs, self.y, is_training=True, reuse=False)

        # output of D for fake images
        G = self.generator(self.z, self.y, is_training=True, reuse=False)
        D_fake, D_fake_logits, _ = \
            self.discriminator(G, self.y, is_training=True, reuse=True)

        # get loss for discriminator
        d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=D_real_logits, labels=tf.ones_like(D_real)))
        d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=D_fake_logits, labels=tf.zeros_like(D_fake)))

        self.d_loss = d_loss_real + d_loss_fake

        # get loss for generator
        self.g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=D_fake_logits, labels=tf.ones_like(D_fake)))

        """ Training """
        # divide trainable variables into a group for D and a group for G
        t_vars = tf.trainable_variables()
        d_vars = [var for var in t_vars if 'd_' in var.name]
        g_vars = [var for var in t_vars if 'g_' in var.name]

        # optimizers
        with tf.control_dependencies(
                tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
            self.d_optim = \
                tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
                .minimize(self.d_loss, var_list=d_vars)
            self.g_optim = \
                tf.train.AdamOptimizer(self.learning_rate*5, beta1=self.beta1)\
                .minimize(self.g_loss, var_list=g_vars)

        """" Testing """
        # for test
        self.fake_images = \
            self.generator(self.z, self.y, is_training=False, reuse=True)

        """ Summary """
        d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
        d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
        d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)

        # final summary operations
        self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
        self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])

        """ MMD """
        self.generated_samples = tf.placeholder(tf.float32,
                                                [None, self.output_height,
                                                 self.output_width,
                                                 self.c_dim],
                                                name="mmd_generatedsamples")
        self.training_data = tf.placeholder(tf.float32,
                                            [None, self.input_height,
                                             self.input_width, self.c_dim],
                                            name="mmd_trainingdata")

        aux_1 = tf.reshape(self.generated_samples,
                           [-1, self.output_width * self.output_height *
                            self.c_dim])

        aux_2 = tf.reshape(self.training_data,
                           [-1, self.input_width * self.input_height *
                            self.c_dim])

        self.log_mmd = tf.log(mmd.rbf_mmd2(aux_1, aux_2))