Пример #1
0
    def generator(self, z):

        with tf.variable_scope("generator"):
            s_h, s_w = self.image_dims['height'], self.image_dims['width']
            s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
            s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
            s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
            s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)

            # 将随机向量z作为输入,通过全连接生成[s_h16, s_w16, G_F_DIM * 8]大小的输出
            g_h0_lin = ops.linear(z, G_F_DIM * 8 * s_h16 * s_w16, 'g_h0_lin')
            g_h0_re = tf.reshape(g_h0_lin, [-1, s_h16, s_w16, G_F_DIM * 8])
            g_bn0 = tf.layers.batch_normalization(g_h0_re, name='g_bn0', training=self.is_train)
            h0 = tf.nn.relu(g_bn0)

            g_h1 = ops.deconv2d(h0, [BATCH_SIZE, s_h8, s_w8, G_F_DIM * 4], name='g_h1')
            g_bn1 = tf.layers.batch_normalization(g_h1, name='g_bn1', training=self.is_train)
            h1 = tf.nn.relu(g_bn1)

            g_h2 = ops.deconv2d(h1, [BATCH_SIZE, s_h4, s_w4, G_F_DIM * 2], name='g_h2')
            g_bn2 = tf.layers.batch_normalization(g_h2, name='g_bn2', training=self.is_train)
            h2 = tf.nn.relu(g_bn2)

            g_h3 = ops.deconv2d(h2, [BATCH_SIZE, s_h2, s_w2, G_F_DIM * 1], name='g_h3')
            g_bn3 = tf.layers.batch_normalization(g_h3, name='g_bn3', training=self.is_train)
            h3 = tf.nn.relu(g_bn3)

            h4 = ops.deconv2d(h3, [BATCH_SIZE, s_h, s_w, self.image_dims['channel']], name='g_h4')

            # 激活函数使用tanh
            return tf.nn.tanh(h4)
Пример #2
0
    def discriminator(self, image, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            # 用5*5,stride为2的filter对输入进行卷积操作
            d_h0_conv = ops.conv2d(image, D_F_DIM, name='d_h0_conv')
            # 激活函数为leaky relu
            h0 = ops.lrelu(d_h0_conv)

            d_h1_conv = ops.conv2d(h0, D_F_DIM * 2, name='d_h1_conv')
            d_bn1 = tf.layers.batch_normalization(d_h1_conv, name='d_bn1', training=self.is_train)
            h1 = ops.lrelu(d_bn1)

            d_h2_conv = ops.conv2d(h1, D_F_DIM * 4, name='d_h2_conv')
            d_bn2 = tf.layers.batch_normalization(d_h2_conv, name='d_bn2', training=self.is_train)
            h2 = ops.lrelu(d_bn2)

            d_h3_conv = ops.conv2d(h2, D_F_DIM * 8, name='d_h3_conv')
            d_bn3 = tf.layers.batch_normalization(d_h3_conv, name='d_bn3', training=self.is_train)
            h3 = ops.lrelu(d_bn3)

            # 最后一层使用全连接
            h4 = ops.linear(tf.reshape(h3, [BATCH_SIZE, -1]), 1, 'd_h4_lin')

            return tf.nn.sigmoid(h4), h4