def discriminator(self, image, is_training, reuse=False):
        with tf.variable_scope("discriminator"):
            if reuse:
                tf.get_variable_scope().reuse_variables()
            # [batch,256,256,1] -> [batch,128,128,64]
            h0 = lrelu(conv2d(image, self.discriminator_dim,
                              scope="d_h0_conv"))
            # [batch,128,128,64] -> [batch,64,64,64*2]
            h1 = lrelu(
                batch_norm(conv2d(h0,
                                  self.discriminator_dim * 2,
                                  scope="d_h1_conv"),
                           is_training,
                           scope="d_bn_1"))
            # [batch,64,64,64*2] -> [batch,32,32,64*4]
            h2 = lrelu(
                batch_norm(conv2d(h1,
                                  self.discriminator_dim * 4,
                                  scope="d_h2_conv"),
                           is_training,
                           scope="d_bn_2"))
            # [batch,32,32,64*4] -> [batch,31,31,64*8]
            h3 = lrelu(
                batch_norm(conv2d(h2,
                                  self.discriminator_dim * 8,
                                  sh=1,
                                  sw=1,
                                  scope="d_h3_conv"),
                           is_training,
                           scope="d_bn_3"))

            # real or fake binary loss
            fc1 = fc(tf.reshape(h3, [self.batch_size, -1]), 1, scope="d_fc1")

            return tf.sigmoid(fc1), fc1
 def _make_descriminator(self, input, phase_train):
     conv1 = ops.batch_norm(ops.conv2d(input, self.df_dim,
                                       name='d_h0_conv'),
                            name='d_bn0',
                            phase_train=phase_train)
     h0 = ops.lrelu(conv1)
     h1 = ops.lrelu(
         ops.batch_norm(ops.conv2d(h0, self.df_dim * 2, name='d_h1_conv'),
                        name='d_bn1',
                        phase_train=phase_train))
     #h2 = ops.lrelu(ops.batch_norm(ops.conv2d(h1, self.df_dim*4, name='d_h2_conv'), name='d_bn2'))
     #h3 = ops.lrelu(ops.batch_norm(ops.conv2d(h2, self.df_dim*8, name='d_h3_conv'), name='d_bn3'))
     h2 = ops.lrelu(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h1_lin')
     return h2
    def _make_generator(self, input, phase_train):
        s_h, s_w = self.img_size, self.img_size
        s_h2, s_w2 = self._conv_out_size_same(s_h,
                                              2), self._conv_out_size_same(
                                                  s_w, 2)
        s_h4, s_w4 = self._conv_out_size_same(s_h2,
                                              2), self._conv_out_size_same(
                                                  s_w2, 2)
        #s_h8, s_w8 = self._conv_out_size_same(s_h4, 2), self._conv_out_size_same(s_w4, 2)
        #s_h16, s_w16 = self._conv_out_size_same(s_h8, 2), self._conv_out_size_same(s_w8, 2)
        # project `z` and reshape
        self.z_, self.h0_w, self.h0_b = ops.linear(input,
                                                   self.gf_dim * 8 * s_h4 *
                                                   s_w4,
                                                   'g_h0_lin',
                                                   with_w=True)
        normalized_value = ops.batch_norm(self.z_,
                                          name='g_bn0',
                                          axes=[0],
                                          phase_train=phase_train)

        self.h0 = tf.reshape(normalized_value,
                             [-1, s_h4, s_w4, self.gf_dim * 8])

        h0 = ops.lrelu(self.h0)

        self.h1, self.h1_w, self.h1_b = ops.deconv2d(
            h0, [self.batch_size, s_h2, s_w2, self.gf_dim * 4],
            name='g_h1',
            with_w=True)
        h1 = ops.lrelu(
            ops.batch_norm(self.h1, name='g_bn1', phase_train=phase_train))

        # h2, self.h2_w, self.h2_b = ops.deconv2d(
        #     h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2', with_w=True)
        # h2 = tf.nn.relu(ops.batch_norm(h2, name='g_bn2'))
        #
        # h3, self.h3_w, self.h3_b = ops.deconv2d(
        #     h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)
        # h3 = tf.nn.relu(ops.batch_norm(h3, name='g_bn3'))

        h2, self.h2_w, self.h2_b = ops.deconv2d(
            h1, [self.batch_size, s_h, s_w, self.c_dim],
            name='g_h4',
            with_w=True)
        h2_non_linear = ops.lrelu(h2, leak=0)
        return h2_non_linear
 def encode_layer(x, output_filters, layer):
     act = lrelu(x)
     conv = conv2d(act,
                   output_filters=output_filters,
                   scope="g_e%d_conv" % layer)
     enc = batch_norm(conv, is_training, scope="g_e%d_bn" % layer)
     encode_layers["e%d" % layer] = enc
     return enc
 def decode_layer(x,
                  output_width,
                  output_filters,
                  layer,
                  enc_layer,
                  dropout=False,
                  do_concat=True):
     dec = deconv2d(lrelu(x), [
         self.batch_size, output_width, output_width, output_filters
     ],
                    scope="g_d%d_deconv" % layer)
     if layer != 8:
         # IMPORTANT: normalization for last layer
         # Very important, otherwise GAN is unstable
         dec = batch_norm(dec,
                          is_training,
                          scope="g_d%d_bn" % layer)
     if dropout:
         dec = tf.nn.dropout(dec, 0.5)
     if do_concat:
         dec = tf.concat([dec, enc_layer], 3)
     return dec
Exemplo n.º 6
0
    def cnn(self, input_images, output_dim, phase=True, bn=False, reuse=None):
        h1 = tf.layers.conv2d(
            input_images,
            filters=16,
            kernel_size=[5, 5],
            strides=2,
            padding="same",
            activation=None if bn else ops.lrelu,
            name="rec_conv_h1",
            kernel_initializer=tf.contrib.layers.xavier_initializer(),
            reuse=reuse)  # 112x112x1 -> 56x56x16
        if bn:
            h1bn = tf.layers.batch_normalization(h1,
                                                 center=True,
                                                 scale=True,
                                                 training=phase,
                                                 name='rec_conv_h1_bn',
                                                 reuse=reuse)

            h1bnact = ops.lrelu(h1bn)
        else:
            h1bnact = h1

        h2 = tf.layers.conv2d(
            h1bnact,
            filters=32,
            kernel_size=[5, 5],
            strides=2,
            padding="same",
            activation=None if bn else ops.lrelu,
            name="rec_conv_h2",
            kernel_initializer=tf.contrib.layers.xavier_initializer(),
            reuse=reuse)  # 56x56x16 -> 28x28x32

        if bn:
            h2bn = tf.layers.batch_normalization(h2,
                                                 center=True,
                                                 scale=True,
                                                 training=phase,
                                                 name='rec_conv_h2_bn',
                                                 reuse=reuse)

            h2bnact = ops.lrelu(h2bn)
        else:
            h2bnact = h2

        h3 = tf.layers.conv2d(
            h2bnact,
            filters=64,
            kernel_size=[5, 5],
            strides=2,
            padding="same",
            activation=None if bn else ops.lrelu,
            name="rec_conv_h3",
            kernel_initializer=tf.contrib.layers.xavier_initializer(),
            reuse=reuse)  # 28x28x32 -> 14x14x64

        if bn:
            h3bn = tf.layers.batch_normalization(h3,
                                                 center=True,
                                                 scale=True,
                                                 training=phase,
                                                 name='rec_conv_h3_bn',
                                                 reuse=reuse)

            h3bnact = ops.lrelu(h3bn)
        else:
            h3bnact = h3

        h4 = tf.layers.conv2d(
            h3bnact,
            filters=128,
            kernel_size=[5, 5],
            strides=2,
            padding="same",
            activation=None if bn else ops.lrelu,
            name="rec_conv_h4",
            kernel_initializer=tf.contrib.layers.xavier_initializer(),
            reuse=reuse)  # 14x14x64 ->  7x7x128

        if bn:
            h4bn = tf.layers.batch_normalization(h4,
                                                 center=True,
                                                 scale=True,
                                                 training=phase,
                                                 name='rec_conv_h4_bn',
                                                 reuse=reuse)

            h4bnact = ops.lrelu(h4bn)
        else:
            h4bnact = h4

        h4_flat = tf.reshape(h4bnact, [
            -1, self.args.img_size_w // 16 * self.args.img_size_h // 16 * 128
        ])

        z = tf.layers.dense(h4_flat,
                            units=output_dim,
                            name="z_rec",
                            reuse=reuse)

        return z
Exemplo n.º 7
0
    def forward_pass(self,
                     state_in,
                     reshape=True,
                     sigmoid_out=False,
                     reuse=None):
        self.state_in = state_in

        shape_in = self.state_in.get_shape().as_list()

        # Get number of input channels for weight/bias init
        channels_in = shape_in[-1]

        with tf.variable_scope(self.scope, reuse=reuse):

            if reshape:
                # Reshape [batch_size, traj_len, H, W, C] into [batch_size*traj_len, H, W, C]
                self.state_in = tf.reshape(
                    self.state_in, [-1, shape_in[2], shape_in[3], shape_in[4]])

            self.conv1 = conv2d(
                self.state_in,
                self.num_filters,
                self.kernels[0],
                self.strides[0],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(channels_in * self.kernels[0] * self.kernels[0]))), (
                        1.0 / tf.sqrt(
                            float(channels_in * self.kernels[0] *
                                  self.kernels[0])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(channels_in * self.kernels[0] * self.kernels[0]))), (
                        1.0 / tf.sqrt(
                            float(channels_in * self.kernels[0] *
                                  self.kernels[0])))),
                scope='conv1')

            self.conv1 = lrelu(self.conv1, self.lrelu_alpha, scope='conv1')

            self.conv2 = conv2d(
                self.conv1,
                self.num_filters,
                self.kernels[1],
                self.strides[1],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[1] *
                          self.kernels[1]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[1] *
                                    self.kernels[1])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[1] *
                          self.kernels[1]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[1] *
                                    self.kernels[1])))),
                scope='conv2')

            self.conv2 = lrelu(self.conv2, self.lrelu_alpha, scope='conv2')

            self.conv3 = conv2d(
                self.conv2,
                self.num_filters,
                self.kernels[2],
                self.strides[2],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[2] *
                          self.kernels[2]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[2] *
                                    self.kernels[2])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[2] *
                          self.kernels[2]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[2] *
                                    self.kernels[2])))),
                scope='conv3')

            self.conv3 = lrelu(self.conv3, self.lrelu_alpha, scope='conv3')

            self.conv4 = conv2d(
                self.conv3,
                self.num_filters,
                self.kernels[3],
                self.strides[3],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[3] *
                          self.kernels[3]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[3] *
                                    self.kernels[3])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[3] *
                          self.kernels[3]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[3] *
                                    self.kernels[3])))),
                scope='conv4')

            self.conv4 = lrelu(self.conv4, self.lrelu_alpha, scope='conv4')

            self.flatten = flatten(self.conv4)

            self.dense = dense(self.flatten,
                               self.dense_size,
                               kernel_init=tf.random_uniform_initializer(
                                   (-1.0 / tf.sqrt(float(self.num_filters))),
                                   (1.0 / tf.sqrt(float(self.num_filters)))),
                               bias_init=tf.random_uniform_initializer(
                                   (-1.0 / tf.sqrt(float(self.num_filters))),
                                   (1.0 / tf.sqrt(float(self.num_filters)))))

            self.output = dense(self.dense,
                                1,
                                kernel_init=tf.random_uniform_initializer(
                                    (-1.0 / tf.sqrt(float(self.dense_size))),
                                    (1.0 / tf.sqrt(float(self.dense_size)))),
                                bias_init=tf.random_uniform_initializer(
                                    (-1.0 / tf.sqrt(float(self.dense_size))),
                                    (1.0 / tf.sqrt(float(self.dense_size)))),
                                scope='output')

            if sigmoid_out:
                self.output = tf.nn.sigmoid(self.output)

            if reshape:
                # Reshape 1d reward output [batch_size*traj_len] into batches [batch_size, traj_len]
                self.output = tf.reshape(self.output, [-1, shape_in[1]])

            self.network_params = tf.trainable_variables(scope=self.scope)

        return self.output
Exemplo n.º 8
0
def discriminator(params, x_init, reuse=False):
    """ Discriminator.

    Parameters
    ----------
    params: dict.
    x_init: input tensor.
    reuse: bool, reuse the net if True.

    Returns
    -------
    x_gan: tensor, outputs for adversarial training.
    x_reg: tensor, outputs for gaze estimation.

    """

    layers = 5
    channel = 64
    image_size = params.image_size

    with tf.compat.v1.variable_scope('discriminator', reuse=reuse):

        # 64 3 -> 32 64 -> 16 128 -> 8 256 -> 4 512 -> 2 1024

        x = conv2d(x_init,
                   channel,
                   conv_filters_dim=4,
                   d_h=2,
                   d_w=2,
                   scope='conv_0',
                   pad=1,
                   use_bias=True)
        x = lrelu(x)

        for i in range(1, layers):
            x = conv2d(x,
                       channel * 2,
                       conv_filters_dim=4,
                       d_h=2,
                       d_w=2,
                       scope='conv_%d' % i,
                       pad=1,
                       use_bias=True)
            x = lrelu(x)
            channel = channel * 2

        filter_size = int(image_size / 2**layers)

        x_gan = conv2d(x,
                       1,
                       conv_filters_dim=filter_size,
                       d_h=1,
                       d_w=1,
                       pad=1,
                       scope='conv_logit_gan',
                       use_bias=False)

        x_reg = conv2d(x,
                       2,
                       conv_filters_dim=filter_size,
                       d_h=1,
                       d_w=1,
                       pad=0,
                       scope='conv_logit_reg',
                       use_bias=False)
        x_reg = tf.reshape(x_reg, [-1, 2])

        return x_gan, x_reg