示例#1
0
    def inference(self, x, reuse=False):
        print(x.get_shape())
        # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
        conv_h0, conv_h0_w, conv_h0_b = mp.conv2d(
            'd_conv_h0',
            x, [5, 5, x.get_shape()[-1], self.first_conv_dim],
            [self.first_conv_dim], [1, 2, 2, 1],
            padding='SAME',
            reuse=reuse,
            with_w=True)
        h0 = mp.lrelu(conv_h0)

        # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
        conv_h1, conv_h1_w, conv_h1_b = mp.conv2d(
            'd_conv_h1',
            h0, [5, 5, h0.get_shape()[-1], self.first_conv_dim],
            [self.first_conv_dim], [1, 2, 2, 1],
            padding='SAME',
            reuse=reuse,
            with_w=True)
        h1 = mp.lrelu(conv_h1)

        # linear projection
        h2 = mp.linear_project('d_lin_project_h1',
                               tf.reshape(h1, [self.batch_size, 7 * 7 * 64]),
                               1,
                               reuse=reuse)
        return tf.nn.sigmoid(h2), h2
示例#2
0
    def inference(self, z, reuse=False, trainable=True):
        with tf.variable_scope("generator") as scope:
            if reuse:
                scope.reuse_variables()
            print("===G")
            # linear projection.
            z_, h0_w, self.h0_b = mp.linear_project('g_lin_project_h0',
                                                    z,
                                                    self.first_conv_dim * 8 *
                                                    4 * 4,
                                                    reuse=reuse,
                                                    with_w=True)
            # reshape for cnn inputs.
            h0 = tf.reshape(z_, [-1, 4, 4, self.first_conv_dim * 8])
            # batch norm
            h0 = tf.nn.relu(self.g_bn0(h0, trainable=trainable))
            # h0 = tf.nn.relu(mp.batch_norm(h0, scope_name='g_bn_h0', reuse=reuse, trainable=trainable))

            # deconv1 conv2d_transpose arguments = (scope_name, inputs, shape, output_shape, bias_shape, stride, padding='VALID', reuse=False, trainable=True)
            deconv_h1, deconv_h1_w, deconv_h1_b = mp.conv2d_transpose(
                'g_deconv_h1',
                h0, [5, 5, self.first_conv_dim * 8,
                     h0.get_shape()[-1]],
                [self.batch_size, 8, 8, self.first_conv_dim * 8],
                [self.first_conv_dim * 8], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            h1 = tf.nn.relu(self.g_bn1(deconv_h1, trainable=trainable))
            # h1 = tf.nn.relu(mp.batch_norm(deconv_h1, scope_name='g_bn_h1', reuse=reuse, trainable=trainable))

            # deconv2 conv2d_transpose arguments = (scope_name, inputs, shape, output_shape, bias_shape, stride, padding='VALID', reuse=False, trainable=True)
            deconv_h2, deconv_h2_w, deconv_h2_b = mp.conv2d_transpose(
                'g_deconv_h2',
                h1, [5, 5, self.first_conv_dim * 4,
                     h1.get_shape()[-1]],
                [self.batch_size, 16, 16, self.first_conv_dim * 4],
                [self.first_conv_dim * 4], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            print(deconv_h2.get_shape())
            h2 = tf.nn.relu(self.g_bn2(deconv_h2, trainable=trainable))
            # h2 = tf.nn.relu(mp.batch_norm(deconv_h2, scope_name='g_bn_h2', reuse=reuse, trainable=trainable))

            # 3rd

            deconv_h3, deconv_h3_w, deconv_h3_b = mp.conv2d_transpose(
                'g_deconv_h3',
                h2, [5, 5, 3, h2.get_shape()[-1]],
                [self.batch_size, 32, 32, 3], [3], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            print(deconv_h3.get_shape())
            return tf.nn.tanh(deconv_h3)
示例#3
0
    def inference(self, x, reuse=False, trainable=True):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            print("===D")
            print(x.get_shape())
            # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
            conv_h0, conv_h0_w, conv_h0_b = mp.conv2d(
                'd_conv_h0',
                x, [5, 5, x.get_shape()[-1], self.first_conv_dim],
                [self.first_conv_dim], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            h0 = mp.lrelu(conv_h0)
            print(h0.get_shape())
            # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
            conv_h1, conv_h1_w, conv_h1_b = mp.conv2d(
                'd_conv_h1',
                h0, [5, 5, h0.get_shape()[-1], self.first_conv_dim * 2],
                [self.first_conv_dim * 2], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            #h1 = mp.lrelu(conv_h1)
            h1 = mp.lrelu(self.d_bn1(conv_h1, trainable=trainable))
            # h1 = mp.lrelu(mp.batch_norm(conv_h1, scope_name='d_bn_h1', reuse=reuse, trainable=trainable))

            # 3rd
            conv_h2, conv_h2_w, conv_h2_b = mp.conv2d(
                'd_conv_h2',
                h1, [5, 5, h1.get_shape()[-1], self.first_conv_dim * 4],
                [self.first_conv_dim * 4], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            #h2 = mp.lrelu(conv_h2)
            h2 = mp.lrelu(self.d_bn2(conv_h2, trainable=trainable))
            # h2 = mp.lrelu(mp.batch_norm(conv_h2, scope_name='d_bn_h2', reuse=reuse, trainable=trainable))
            print(h2.get_shape())

            # linear projection (skip h3)
            h3 = mp.linear_project('d_lin_project_h3',
                                   tf.reshape(h2, [self.batch_size, -1]),
                                   1,
                                   reuse=reuse)
            return tf.nn.sigmoid(h3), h3
示例#4
0
    def inference(self, z, reuse=False):
        # linear projection.
        z_, h0_w, self.h0_b = mp.linear_project('g_lin_project_h0',
                                                z,
                                                self.first_conv_dim * 8 * 7 *
                                                7,
                                                reuse=reuse,
                                                with_w=True)
        # reshape for cnn inputs.
        h0 = tf.reshape(z_, [-1, 7, 7, self.first_conv_dim * 8])
        # batch norm
        h0 = tf.nn.relu(mp.batch_norm(h0, scope_name='g_bn_h0', reuse=reuse))

        # deconv1 conv2d_transpose arguments = (scope_name, inputs, shape, output_shape, bias_shape, stride, padding='VALID', reuse=False, trainable=True)
        deconv_h1, deconv_h1_w, deconv_h1_b = mp.conv2d_transpose(
            'g_deconv_h1',
            h0, [5, 5, self.first_conv_dim * 4,
                 h0.get_shape()[-1]],
            [self.batch_size, 14, 14, self.first_conv_dim * 4],
            [self.first_conv_dim * 4], [1, 2, 2, 1],
            padding='SAME',
            reuse=reuse,
            with_w=True)
        h1 = tf.nn.relu(
            mp.batch_norm(deconv_h1, scope_name='g_bn_h1', reuse=reuse))

        # deconv2 conv2d_transpose arguments = (scope_name, inputs, shape, output_shape, bias_shape, stride, padding='VALID', reuse=False, trainable=True)
        deconv_h2, deconv_h2_w, deconv_h2_b = mp.conv2d_transpose(
            'g_deconv_h2',
            h1, [5, 5, 1, h1.get_shape()[-1]], [self.batch_size, 28, 28, 1],
            [1], [1, 2, 2, 1],
            padding='SAME',
            reuse=reuse,
            with_w=True)
        print(deconv_h2.get_shape())
        return tf.nn.tanh(deconv_h2)