示例#1
0
def get_chamfer_permut(data, permut):
    assert len(permut) == len(data)
    permut = np.array(permut)
    c1, _, c2, _ = nn_distance(data, data[permut])
    sess = tf.Session()
    c1, c2 = sess.run([c1, c2])
    dist = np.mean(c1) + np.mean(c2)
    return dist
示例#2
0
def get_avg_chamfer_own(data):
    l_data = len(data)
    l_fake = l_data
    data_fake = data
    sess = tf.Session()
    dist_list = list()
    for j in range(l_fake):
        fake_data = np.tile(data_fake[j], (l_data - 1, 1, 1))
        c1, _, c2, _ = nn_distance(
            fake_data, np.concatenate((data[:j], data[j + 1:]), axis=0))
        c1, c2 = sess.run([c1, c2])
        dist = np.mean(c1, axis=1) + np.mean(c2, axis=1)
        dist = dist.mean()
        dist_list.append(dist)
        print('compared to %s: %s' % (j, np.mean(dist_list)))
    return fake_to_true, true_to_fake, dist_list
示例#3
0
def get_nn_chamfer(data, data_fake):
    l_data = len(data)
    l_fake = len(data_fake)
    sess = tf.Session()
    dist_list = list()
    fake_to_true = defaultdict(list)
    true_to_fake = defaultdict(list)
    for j in range(l_fake):
        fake_data = np.tile(data_fake[j], (l_data, 1, 1))
        c1, _, c2, _ = nn_distance(fake_data, data)
        c1, c2 = sess.run([c1, c2])
        dist = np.mean(c1, axis=1) + np.mean(c2, axis=1)
        code = dist.argmin()
        dist = dist.min()
        fake_to_true[j] = (code, dist)
        true_to_fake[code].append((j, dist))
        dist_list.append(dist)
        print('compared to %s: %s' % (j, np.mean(dist_list)))
    return fake_to_true, true_to_fake, dist_list
    def _create_loss(self):

        c = self.configuration
        disc_kwargs = {}
        self.noise = tf.random_normal(
            [c.batch_size, self.z.get_shape().as_list()[1]]) / 10
        self.flag = 0
        if self.flag:
            theta = tf.random_normal([50, self.z.get_shape().as_list()[1]])
            projae = tf.matmul(self.z, tf.transpose(theta))
            projn = tf.matmul(self.noise, tf.transpose(theta))
            self.loss_d = tf.reduce_mean((tf.nn.top_k(tf.transpose(projae),k=c.batch_size).values- \
                tf.nn.top_k(tf.transpose(projn),k=c.batch_size).values)**2)
            self.loss_g = self.loss_d
            train_vars = tf.trainable_variables()

            g_params = [
                v for v in train_vars if '/discriminator/' not in v.name
            ]
            g_0 = g_params[0]
            self.g_gradients = tf.gradients(self.loss_g, g_0)[0]
        else:
            with tf.variable_scope("discriminator") as scope:
                _, self.disc_z = self.discriminator(self.z,
                                                    scope=scope,
                                                    **disc_kwargs)
                self.noise = tf.random_normal(
                    [c.batch_size,
                     self.z.get_shape().as_list()[1]]) / 10

                _, self.disc_n = self.discriminator(self.noise,
                                                    reuse=True,
                                                    scope=scope,
                                                    **disc_kwargs)
            self.loss_d = tf.reduce_mean(self.disc_n) - tf.reduce_mean(
                self.disc_z)
            self.loss_g = tf.reduce_mean(self.disc_z)
            train_vars = tf.trainable_variables()

            g_params = [
                v for v in train_vars if '/discriminator/' not in v.name
            ]
            g_0 = g_params[0]
            self.g_gradients = tf.gradients(self.loss_g, g_0)[0]
            epsilon = tf.random_uniform([], 0.0, 1.0)
            x_hat = self.noise * epsilon + (1 - epsilon) * self.z
            with tf.variable_scope('discriminator') as scope:
                self.d_hat_prob, self.d_hat = self.discriminator(x_hat,
                                                                 reuse=True,
                                                                 scope=scope)
            gradients = tf.gradients(self.d_hat, x_hat)[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = 10 * tf.reduce_mean((slopes - 1.0)**2)
            self.loss_d += gradient_penalty

        if c.loss == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_reconstr,
                                                       self.gt)
            self.loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
        elif c.loss == 'emd':
            match = approx_match(self.x_reconstr, self.gt)
            self.loss = tf.reduce_mean(
                match_cost(self.x_reconstr, self.gt, match))
        if c.adv_ae:
            self.loss_g_gradients = tf.gradients(self.loss, g_0)[0]
            self.loss += (self.loss_g / 100)
        reg_losses = self.graph.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        if c.exists_and_is_not_none('w_reg_alpha'):
            w_reg_alpha = c.w_reg_alpha
        else:
            w_reg_alpha = 1.0

        for rl in reg_losses:
            self.loss += (w_reg_alpha * rl)