コード例 #1
0
    def decoder(self, net, reuse=None, training=True):
        f_dims = DEFAULT_FILTER_DIMS
        with tf.variable_scope('decoder', reuse=reuse):
            with slim.arg_scope(ae_argscope(activation=self.activation, padding='SAME', training=training)):
                for l in range(0, self.num_layers - 1):
                    net = up_conv2d(net, num_outputs=f_dims[self.num_layers - l - 2], scope='deconv_{}'.format(l))
                net = up_conv2d(net, num_outputs=32, scope='deconv_{}'.format(self.num_layers))

                net = slim.conv2d(net, num_outputs=3, scope='deconv_{}'.format(self.num_layers + 1), stride=1,
                                  activation_fn=tf.nn.tanh, normalizer_fn=None)
                return net
コード例 #2
0
ファイル: SDNet_avgDisc.py プロジェクト: SimuJenni/SemDefNet
    def generator(self, net, drop_mask, reuse=None, training=True):
        """Builds a generator with the given inputs. Noise is induced in all convolutional layers.

        Args:
            net: Input to the generator (i.e. cartooned image and/or edge-map)
            reuse: Whether to reuse already defined variables
            training: Whether in train or eval mode.

        Returns:
            Encoding of the input.
        """
        f_dims = DEFAULT_FILTER_DIMS
        res_dim = DEFAULT_FILTER_DIMS[self.num_layers - 1]
        with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope(toon_net_argscope(padding='SAME', training=training)):
                net_in = net
                for i in range(self.num_res_layers):
                    net = res_block_bottleneck(net, res_dim, res_dim / 4, noise_channels=32, scope='res_{}'.format(i))
                    net = net_in + (1.0-drop_mask)*net
                for l in range(0, self.num_layers - 1):
                    net = up_conv2d(net, num_outputs=f_dims[self.num_layers - l - 2], scope='deconv_{}'.format(l))
                net = tf.image.resize_images(net, (self.im_shape[0], self.im_shape[1]),
                                             tf.image.ResizeMethod.NEAREST_NEIGHBOR)
                net = slim.conv2d(net, num_outputs=32, scope='deconv_{}'.format(self.num_layers), stride=1)
                net = slim.conv2d(net, num_outputs=3, scope='deconv_{}'.format(self.num_layers + 1), stride=1,
                                  activation_fn=tf.nn.tanh, normalizer_fn=None)
                return net
コード例 #3
0
    def decoder(self, net, reuse=None, training=True):
        """Builds a decoder on top of net.

        Args:
            net: Input to the decoder (output of encoder)
            reuse: Whether to reuse already defined variables
            training: Whether in train or eval mode.

        Returns:
            Decoded image with 3 channels.
        """
        f_dims = DEFAULT_FILTER_DIMS
        with tf.variable_scope('decoder', reuse=reuse):
            with slim.arg_scope(
                    toon_net_argscope(padding='SAME', training=training)):
                for l in range(0, self.num_layers - 1):
                    net = up_conv2d(net,
                                    num_outputs=f_dims[self.num_layers - l -
                                                       2],
                                    scope='deconv_{}'.format(l))
                net = tf.image.resize_images(
                    net, (self.im_shape[0], self.im_shape[1]),
                    tf.image.ResizeMethod.NEAREST_NEIGHBOR)
                net = slim.conv2d(net,
                                  num_outputs=32,
                                  scope='deconv_{}'.format(self.num_layers),
                                  stride=1)
                net = slim.conv2d(net,
                                  num_outputs=3,
                                  scope='deconv_{}'.format(self.num_layers +
                                                           1),
                                  stride=1,
                                  activation_fn=tf.nn.tanh,
                                  normalizer_fn=None)
                return net
コード例 #4
0
    def generator(self, net, drop_mask, reuse=None, training=True):
        f_dims = DEFAULT_FILTER_DIMS
        num_layers = self.ae.num_layers
        with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope(
                    sdnet_argscope(activation=self.activation,
                                   padding='SAME',
                                   training=training)):
                net = repair_res_layer(net,
                                       drop_mask,
                                       f_dims[num_layers - 1],
                                       0,
                                       activation_fn=self.activation,
                                       scope='repair_0')

        for l in range(0, num_layers - 1):
            with tf.variable_scope('decoder', reuse=True):
                with slim.arg_scope(
                        ae_argscope(activation=self.activation,
                                    padding='SAME',
                                    training=False)):
                    net = up_conv2d(net,
                                    num_outputs=f_dims[num_layers - l - 2],
                                    scope='deconv_{}'.format(l))
                drop_mask = upsample_mask(drop_mask)
            with tf.variable_scope('generator', reuse=reuse):
                with slim.arg_scope(
                        sdnet_argscope(activation=self.activation,
                                       padding='SAME',
                                       training=training)):
                    net = repair_res_layer(net,
                                           drop_mask,
                                           f_dims[num_layers - l - 2],
                                           0,
                                           activation_fn=self.activation,
                                           scope='repair_{}'.format(l + 1))

        with tf.variable_scope('decoder', reuse=True):
            with slim.arg_scope(
                    ae_argscope(activation=self.activation,
                                padding='SAME',
                                training=False)):
                net = up_conv2d(net,
                                num_outputs=32,
                                scope='deconv_{}'.format(num_layers))
            drop_mask = upsample_mask(drop_mask)

        with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope(
                    sdnet_argscope(activation=self.activation,
                                   padding='SAME',
                                   training=training)):
                net = repair_res_layer(net,
                                       drop_mask,
                                       32,
                                       0,
                                       activation_fn=self.activation,
                                       scope='repair_{}'.format(num_layers))

        with tf.variable_scope('decoder', reuse=True):
            with slim.arg_scope(
                    ae_argscope(activation=self.activation,
                                padding='SAME',
                                training=False)):
                net = slim.conv2d(net,
                                  num_outputs=3,
                                  scope='deconv_{}'.format(num_layers + 1),
                                  stride=1,
                                  activation_fn=tf.nn.tanh,
                                  normalizer_fn=None)
        return net