Exemplo n.º 1
0
    def fairness(self, vecs):
        r"""Build fairness metrics component.
        """
        logits = tf.concat(vecs, axis=1)
        for i in range(self.num_dis_layers):
            with tf.variable_scope('fair_fc{}'.format(i)):
                if i == 0:
                    logits = FullyConnected(
                        'fc',
                        logits,
                        self.num_dis_hidden,
                        nl=tf.identity,
                        kernel_initializer=tf.truncated_normal_initializer(
                            stddev=0.1))
                else:
                    logits = FullyConnected('fc',
                                            logits,
                                            self.num_dis_hidden,
                                            nl=tf.identity)

                logits = tf.concat(
                    [logits, self.batch_diversity(logits)], axis=1)
                logits = BatchNorm('bn', logits, center=True, scale=False)
                logits = Dropout(logits)
                logits = tf.nn.leaky_relu(logits)

        return FullyConnected('fair_fc_top', logits, 1, nl=tf.identity)
Exemplo n.º 2
0
    def build_pre_res_block(x, name, chan, pad_type, norm_type, act_type, first=False):
        """The value of input should be after normalization but before non-linearity.
        So the non-linearity function is needed to be applied in the residual branch.

        This implementation assumes that the input and output dimensions are
        consistent. The upsampling and downsampling steps are implemented in other
        modules such as Conv2D_Transpose and Conv2d, or tile and avg_pool.
        """
        with tf.variable_scope(name):
            assert x.get_shape().as_list()[-1] == chan
            shortcut = x
            if norm_type == "instance":
                res_input = InstanceNorm("inorm", x)
            elif norm_type == "batch":
                res_input = BatchNorm("bnorm", x)
            else:
                res_input = tf.nn.identity(x, "nonorm")
            if act_type == "relu":
                res_input = tf.nn.relu(res_input, "act_input")
            else:
                res_input = tf.nn.leaky_relu(res_input, alpha=0.2, name="act_input")
            return (LinearWrap(res_input)
                .tf.pad([[0,0], [1,1], [1,1], [0,0]], mode=pad_type)
                .Conv2D("conv0", chan, 3, padding="VALID")
                .tf.pad([[0,0], [1,1], [1,1], [0,0]], mode=pad_type)
                .Conv2D("conv1", chan, 3, padding="VALID", activation=tf.identity)
            )() + shortcut
    def discriminator(self, vecs):
        r"""Build discriminator.

        We use a :math:`l`-layer fully connected neural network as the discriminator.
        We concatenate :math:`v_{1:n_c}`, :math:`u_{1:n_c}` and :math:`d_{1:n_d}` together as the
        input. We compute the internal layers as

        .. math::
            \begin{aligned}

            f^{(D)}_{1} &= \textrm{LeakyReLU}(\textrm{BN}(W^{(D)}_{1}(v_{1:n_c} \oplus u_{1:n_c}
                \oplus d_{1:n_d})

            f^{(D)}_{1} &= \textrm{LeakyReLU}(\textrm{BN}(W^{(D)}_{i}(f^{(D)}_{i−1} \oplus
                \textrm{diversity}(f^{(D)}_{i−1})))), i = 2:l

            \end{aligned}

        where :math:`\oplus` is the concatenation operation. :math:`\textrm{diversity}(·)` is the
        mini-batch discrimination vector [42]. Each dimension of the diversity vector is the total
        distance between one sample and all other samples in the mini-batch using some learned
        distance metric. :math:`\textrm{BN}(·)` is batch normalization, and
        :math:`\textrm{LeakyReLU}(·)` is the leaky reflect linear activation function. We further
        compute the output of discriminator as :math:`W^{(D)}(f^{(D)}_{l} \oplus \textrm{diversity}
        (f^{(D)}_{l}))` which is a scalar.

        Args:
            vecs(list[tensorflow.Tensor]): List of tensors matching the spec of :meth:`inputs`

        Returns:
            tensorpack.FullyConected: a (b, 1) logits

        """
        logits = tf.concat(vecs, axis=1)
        for i in range(self.num_dis_layers):
            with tf.variable_scope('dis_fc{}'.format(i)):
                if i == 0:
                    logits = FullyConnected(
                        'fc',
                        logits,
                        self.num_dis_hidden,
                        nl=tf.identity,
                        kernel_initializer=tf.truncated_normal_initializer(
                            stddev=0.1))

                else:
                    logits = FullyConnected('fc',
                                            logits,
                                            self.num_dis_hidden,
                                            nl=tf.identity)

                logits = tf.concat(
                    [logits, self.batch_diversity(logits)], axis=1)
                logits = BatchNorm('bn', logits, center=True, scale=False)
                logits = Dropout(logits)
                logits = tf.nn.leaky_relu(logits)

        return FullyConnected('dis_fc_top', logits, 1, nl=tf.identity)
Exemplo n.º 4
0
 def build_preact_resblock(x,
                           chan,
                           kernel=3,
                           stride=1,
                           res_mult=1.,
                           first=False,
                           name="resblock"):
     """
     """
     chan_in = x.get_shape().as_list()[-1]
     with tf.variable_scope(name):
         bn_input = BatchNorm("bn_input",
                              x,
                              epsilon=EPSILON,
                              center=False,
                              scale=True)
         act_input = ActBias(bn_input, name="act_input")
         # shortcut
         if first:
             input_shortcut = act_input
         else:
             input_shortcut = x
         if chan_in == chan and stride == 1:
             shortcut = input_shortcut
         else:
             shortcut = Conv2D(input_shortcut,
                               chan,
                               stride,
                               stride,
                               name="conv_shortcut")
         # residual branch
         conv1 = Conv2D(act_input, chan, kernel, stride, name="conv1")
         bn1 = BatchNorm("bn1",
                         conv1,
                         epsilon=EPSILON,
                         center=False,
                         scale=True)
         act1 = ActBias(bn1, name="act1")
         conv2 = Conv2D(act1, chan, kernel, 1, name="conv2")
         # join two paths
         y = shortcut + conv2 * res_mult
     return y
def deconv(name, l, k, net=None):
    with tf.variable_scope(name):
        #l = tf.layers.UpSampling2D(l,2)
        l = BatchNorm('ln', l)
        l = LeakyReLU('leak', l, 0.33)
        l = Deconv2D(name, l, k, 5, stride=2)

        if net is not None:
            net[name] = l

    return l
Exemplo n.º 6
0
def act(name, x, norm_type, alpha):
    with tf.variable_scope(name, default_name="act"):
        if norm_type == "instance":
            x = InstanceNorm("norm", x)
        elif norm_type == "batch":
            x = BatchNorm("norm", x)
        if alpha is None:
            x = tf.identity(x)
        elif alpha == 0:
            x = tf.nn.relu(x)
        else:
            x = tf.nn.leaky_relu(x, alpha=alpha)
    return x
Exemplo n.º 7
0
def ScaleNormConv2D(
        x,
        chan,
        kernel,
        stride,
        scale_mean=1.,
        scale_std=0.,  # one initializer
        eps=EPSILON,
        name="norm_conv2d_scale"):
    with tf.variable_scope(name):
        y = Conv2D(x, chan, kernel, stride)
        y_normalised = BatchNorm("scale_bn",
                                 y,
                                 epsilon=eps,
                                 center=False,
                                 scale=True)
    return y_normalised
Exemplo n.º 8
0
def NormConv2DScale(
        x,
        chan,
        kernel,
        stride,
        scale_mean=1.,
        scale_std=0.,  # one initializer
        eps=EPSILON,
        name="norm_conv2d_scale"):
    with tf.variable_scope(name):
        x_scaled = Scale(x, scale_mean, scale_std)
        y = Conv2D(x_scaled, chan, kernel, stride)
        y_normalised = BatchNorm("bn_noaffine",
                                 y,
                                 epsilon=eps,
                                 center=False,
                                 scale=False)
    return y_normalised
def conv(name, l, channel, k, stride=1, net=None, use_bn=True):
    with tf.variable_scope(name):
        if use_bn:
            l = BatchNorm('bn', l)
            l = LeakyReLU('leak', l, 0.33)

        if stride > 1:
            l = Conv2D('conv', l, channel, k, stride=stride)
        else:
            l = tf.layers.conv2d(l,
                                 channel,
                                 k,
                                 padding='SAME',
                                 dilation_rate=2)

        if net is not None:
            net[name] = l

    return l
Exemplo n.º 10
0
def BNLReLU(x, name=None):
    x = BatchNorm('bnorm', x)
    return tf.nn.leaky_relu(x, alpha=0.2, name=name)
Exemplo n.º 11
0
def BNReLU(x, name=None):
    x = BatchNorm('bnorm', x)
    return tf.nn.relu(x, name=name)
Exemplo n.º 12
0
 def build_stage(self, pre_image_input, gram_target, n_loss_layer, name="stage"):
     res_block = ProgressiveSynTex.build_pre_res_block if self._pre_act \
         else ProgressiveSynTex.build_res_block
     upsample = ProgressiveSynTex.build_upsampling_nnconv if self._nn_upsample \
         else ProgressiveSynTex.build_upsampling_deconv
     if self._norm_type == "instance":
         norm = lambda _x, _name: InstanceNorm(_name, _x)
         if self._act_type == "relu":
             norm_act = INReLU
         else:
             norm_act = INLReLU
     elif self._norm_type == "batch":
         norm = lambda _x, _name: BatchNorm(_name, _x)
         if self._act_type == "relu":
             norm_act = BNReLU
         else:
             norm_act = BNLReLU
     else:
         norm = tf.identity
         if self._act_type == "relu":
             norm_act = NONReLU
         else:
             norm_act = NONLReLU
     if self._act_type == "relu":
         act = NONReLU
     else:
         act = NONLReLU
     coefs = OrderedDict()
     for k in list(SynTexModelDesc.DEFAULT_COEFS.keys())[:n_loss_layer]:
         coefs[k] = SynTexModelDesc.DEFAULT_COEFS[k]
     with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
         # extract features and gradients
         image_input = self._act(pre_image_input, name="input_"+name)
         feat_input, loss_input, loss_per_layer, grad_per_layer = \
                 self.build_stage_preparation(image_input, gram_target, coefs)
         # For an adaptive texture synthesizer, we provide gradients explicitly to
         # the synthesizer.
         #            none +
         # grad[4] conv[4] -> res[4] -> up[4] +
         #                    grad[3] conv[3] -> res[3] -> up[3] +
         #                                       grad[2] conv[2] -> res[2] -> up[2] +
         #                                                               ... ...
         #                                                 up[1] +
         #                                       grad[0] conv[0] -> res[0] -> output
         with argscope([Conv2D, Conv2DTranspose], activation=norm_act, use_bias=False):
             first = True
             for layer in reversed(feat_input):
                 if layer in grad_per_layer:
                     grad = grad_per_layer[layer]
                     chan = grad.get_shape().as_list()[-1]
                     with tf.variable_scope(layer):
                         # compute pseudo grad of current layer
                         grad = PadConv2D(grad, chan, self._grad_ksize, self._pad_type, norm_act, False, "grad_conv1")
                         grad = PadConv2D(grad, chan, self._grad_ksize, self._pad_type, tf.identity, False, "grad_conv2")
                         # merge with grad from deeper layers
                         if first:
                             delta = tf.identity(grad, name="grad_merged")
                             first = False
                         else:
                             # upsample deeper grad
                             if self._pre_act:
                                 delta = norm_act(delta, "pre_inrelu")
                             else:
                                 delta = act(delta, "pre_relu")
                             delta = upsample(delta, "up", self._pad_type, chan=chan) # not normalized nor activated
                             # add two grads
                             delta = tf.add(grad, delta, name="grad_merged")
                         if not self._pre_act:
                             delta = norm(delta, "post_inorm")
                         # simulate the backpropagation procedure to next level
                         for k in range(self._n_block):
                             delta = res_block(delta, "res{}".format(k), chan,
                                     self._pad_type, self._norm_type,
                                     self._act_type, first=(k == 0))
             # output
             if self._pre_act:
                 delta = norm_act(delta, "actlast")
             else:
                 delta = act(delta, "actlast")
             delta_input = PadConv2D(delta, 3, 3, self._pad_type, tf.identity, True, "convlast")
         if self._stop_grad:
             pre_image_output = tf.add(tf.stop_gradient(pre_image_input), delta_input, name="pre_image_output")
         else:
             pre_image_output = tf.add(pre_image_input, delta_input, name="pre_image_output")
     return image_input, loss_input, loss_per_layer, pre_image_output