示例#1
0
    def fairness(self, vecs):
        r"""Build fairness metrics component.
        """
        logits = tf.concat(vecs, axis=1)
        for i in range(self.num_dis_layers):
            with tf.variable_scope('fair_fc{}'.format(i)):
                if i == 0:
                    logits = FullyConnected(
                        'fc',
                        logits,
                        self.num_dis_hidden,
                        nl=tf.identity,
                        kernel_initializer=tf.truncated_normal_initializer(
                            stddev=0.1))
                else:
                    logits = FullyConnected('fc',
                                            logits,
                                            self.num_dis_hidden,
                                            nl=tf.identity)

                logits = tf.concat(
                    [logits, self.batch_diversity(logits)], axis=1)
                logits = BatchNorm('bn', logits, center=True, scale=False)
                logits = Dropout(logits)
                logits = tf.nn.leaky_relu(logits)

        return FullyConnected('fair_fc_top', logits, 1, nl=tf.identity)
    def discriminator(self, vecs):
        r"""Build discriminator.

        We use a :math:`l`-layer fully connected neural network as the discriminator.
        We concatenate :math:`v_{1:n_c}`, :math:`u_{1:n_c}` and :math:`d_{1:n_d}` together as the
        input. We compute the internal layers as

        .. math::
            \begin{aligned}

            f^{(D)}_{1} &= \textrm{LeakyReLU}(\textrm{BN}(W^{(D)}_{1}(v_{1:n_c} \oplus u_{1:n_c}
                \oplus d_{1:n_d})

            f^{(D)}_{1} &= \textrm{LeakyReLU}(\textrm{BN}(W^{(D)}_{i}(f^{(D)}_{i−1} \oplus
                \textrm{diversity}(f^{(D)}_{i−1})))), i = 2:l

            \end{aligned}

        where :math:`\oplus` is the concatenation operation. :math:`\textrm{diversity}(·)` is the
        mini-batch discrimination vector [42]. Each dimension of the diversity vector is the total
        distance between one sample and all other samples in the mini-batch using some learned
        distance metric. :math:`\textrm{BN}(·)` is batch normalization, and
        :math:`\textrm{LeakyReLU}(·)` is the leaky reflect linear activation function. We further
        compute the output of discriminator as :math:`W^{(D)}(f^{(D)}_{l} \oplus \textrm{diversity}
        (f^{(D)}_{l}))` which is a scalar.

        Args:
            vecs(list[tensorflow.Tensor]): List of tensors matching the spec of :meth:`inputs`

        Returns:
            tensorpack.FullyConected: a (b, 1) logits

        """
        logits = tf.concat(vecs, axis=1)
        with tf.variable_scope('discrim'):
            for i in range(self.num_dis_layers):
                with tf.variable_scope('dis_fc{}'.format(i)):
                    if i == 0:
                        logits = FullyConnected(
                            'fc',
                            logits,
                            self.num_dis_hidden,
                            nl=tf.identity,
                            kernel_initializer=tf.truncated_normal_initializer(
                                stddev=0.1))

                    else:
                        logits = FullyConnected('fc',
                                                logits,
                                                self.num_dis_hidden,
                                                nl=tf.identity)

                    logits = tf.concat(
                        [logits, self.batch_diversity(logits)], axis=1)
                    logits = LayerNorm('ln', logits)
                    logits = Dropout(logits)
                    logits = tf.nn.leaky_relu(logits)

            return FullyConnected('dis_fc_top', logits, 1, nl=tf.identity)