Exemplo n.º 1
0
    def generator(z, y, image_size, batch_size, y_dim, gfc_dim, gf_dim, c_dim):
        with tf.variable_scope("generator"):
            g_bn0 = BatchNorm(name='g_bn0')
            g_bn1 = BatchNorm(name='g_bn1')
            g_bn2 = BatchNorm(name='g_bn2')

            # taken from https://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py
            s_h, s_w = image_size[1], image_size[0]
            s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
            s_w2, s_w4 = int(s_w / 2), int(s_w / 4)

            yb = tf.reshape(y, [batch_size, 1, 1, y_dim])
            z = tf.concat([z, y], 1)

            h0 = tf.nn.relu(g_bn0(linear(z, gfc_dim, 'g_h0_lin')))
            h0 = tf.concat([h0, y], 1)

            h1 = tf.nn.relu(
                g_bn1(linear(h0, gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin')))
            h1 = tf.reshape(h1, [batch_size, s_h4, s_w4, gf_dim * 2])

            h1 = conv_cond_concat(h1, yb)

            h2 = tf.nn.relu(
                g_bn2(
                    deconv2d(h1, [batch_size, s_h2, s_w2, gf_dim * 2],
                             name='g_h2')))
            h2 = conv_cond_concat(h2, yb)

            h3 = tf.nn.sigmoid(
                deconv2d(h2, [batch_size, s_h, s_w, c_dim], name='g_h3'))

            return tf.identity(h3, 'generator')
Exemplo n.º 2
0
    def discriminator(x,
                      y,
                      batch_size,
                      y_dim,
                      c_dim,
                      df_dim,
                      dfc_dim,
                      reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            d_bn1 = BatchNorm(name='d_bn1')
            d_bn2 = BatchNorm(name='d_bn2')

            yb = tf.reshape(y, [batch_size, 1, 1, y_dim])
            x = conv_cond_concat(x, yb)

            h0 = lrelu(conv2d(x, c_dim + y_dim, name='d_h0_conv'))
            h0 = conv_cond_concat(h0, yb)

            h1 = lrelu(d_bn1(conv2d(h0, df_dim + y_dim, name='d_h1_conv')))
            h1 = tf.reshape(h1, [batch_size, -1])
            h1 = tf.concat([h1, y], 1)

            h2 = lrelu(d_bn2(linear(h1, dfc_dim, 'd_h2_lin')))
            h2 = tf.concat([h2, y], 1)

            h3 = linear(h2, 1, 'd_h3_lin')

            return tf.nn.sigmoid(h3), h3
Exemplo n.º 3
0
    def create_generator(self, z, y, scope_name, is_training=True, reuse=False):
        with tf.variable_scope(scope_name) as scope:
            if reuse:
                scope.reuse_variables()

            batch_norm_params = {
                # 'decay': 0.999,
                'decay': 0.9,  # also known as momentum, they are the same
                'updates_collections': None,
                # 'epsilon': 0.001,
                'epsilon': 1e-5,
                'scale': True,
                'is_training': is_training,
                'scope': 'batch_norm',
            }

            # first argument is where to apply these
            with arg_scope([layers.conv2d, layers.conv2d_transpose, layers.fully_connected],
                           normalizer_fn=layers.batch_norm,
                           normalizer_params=batch_norm_params,
                           weights_initializer=layers.xavier_initializer(uniform=False),
                           biases_initializer=tf.constant_initializer(0.0)
                           ):
                # taken from https://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py
                s_h, s_w = self.image_size[1], self.image_size[0]
                s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
                s_w2, s_w4 = int(s_w / 2), int(s_w / 4)

                yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                z = tf.concat([z, y], 1)

                h0 = slim.fully_connected(z,
                                          num_outputs=self.gfc_dim,
                                          scope='g_h0_lin',
                                          activation_fn=slim.nn.relu,
                                          )

                h0 = tf.concat([h0, y], 1)

                h1 = slim.fully_connected(h0,
                                          num_outputs=self.gf_dim * 2 * s_h4 * s_w4,
                                          scope='g_h1_lin',
                                          activation_fn=slim.nn.relu,
                                          )

                h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])

                h1 = conv_cond_concat(h1, yb)

                h2 = slim.conv2d_transpose(h1,
                                           num_outputs=self.gf_dim * 2,
                                           scope='g_h2',
                                           kernel_size=[5, 5],
                                           stride=2,
                                           activation_fn=slim.nn.relu,
                                           )

                h2 = conv_cond_concat(h2, yb)

                h3 = slim.conv2d_transpose(h2,
                                           num_outputs=self.c_dim,
                                           scope='g_h3',
                                           kernel_size=[5, 5],
                                           stride=2,
                                           normalizer_fn=None,
                                           activation_fn=slim.nn.sigmoid
                                           )

                return h3
Exemplo n.º 4
0
    def create_discriminator(self, x, y, scope_name, is_training=True, reuse=False):
        with tf.variable_scope(scope_name) as scope:
            if reuse:
                scope.reuse_variables()

            batch_norm_params = {
                # 'decay': 0.999,
                'decay': 0.9,  # also known as momentum, they are the same
                'updates_collections': None,
                # 'epsilon': 0.001,
                'epsilon': 1e-5,
                'scale': True,
                'is_training': is_training,
                'scope': 'batch_norm',
            }

            with arg_scope([layers.conv2d, layers.conv2d_transpose, layers.fully_connected],
                           normalizer_fn=layers.batch_norm,
                           normalizer_params=batch_norm_params,
                           weights_initializer=layers.xavier_initializer(uniform=False),
                           biases_initializer=tf.constant_initializer(0.0)
                           ):
                yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                x = conv_cond_concat(x, yb)

                h0 = slim.conv2d(x,
                                 num_outputs=self.c_dim + self.y_dim,
                                 scope='d_h0_conv',
                                 kernel_size=[5, 5],
                                 stride=[2, 2],
                                 normalizer_fn=None,
                                 activation_fn=lrelu,
                                 )

                h0 = conv_cond_concat(h0, yb)

                # not having bias variables here because of bias adding in batch normalization, see: https://stackoverflow.com/questions/46256747/can-not-use-both-bias-and-batch-normalization-in-convolution-layers
                h1 = slim.conv2d(h0,
                                 num_outputs=self.df_dim + self.y_dim,
                                 scope='d_h1_conv',
                                 kernel_size=[5, 5],
                                 stride=[2, 2],
                                 activation_fn=lrelu
                                 )

                h1 = tf.reshape(h1, [self.batch_size, -1])
                h1 = tf.concat([h1, y], 1)

                h2 = slim.fully_connected(h1,
                                          num_outputs=self.dfc_dim,
                                          scope='d_h2_lin',
                                          activation_fn=lrelu,
                                          )
                h2 = tf.concat([h2, y], 1)

                h3 = slim.fully_connected(h2,
                                          num_outputs=1,
                                          scope='d_h3_lin',
                                          normalizer_fn=None,
                                          activation_fn=None
                                          )

                return tf.nn.sigmoid(h3), h3