예제 #1
0
def code_classifier_forward(config, incoming=None, image=None,
                            scope="code_classifier", name=None, reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = leaky_relu(fully_connected(tf.reshape(image, [config.batch_size, 28 * 28]), 512))
        prod = tf.matmul(code_output[:, :, None], output[:, None, :])

        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([code_output,
                            tf.matmul(prob, output[:, :, None])[:, :, 0],
                            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]], axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 10)

    return output
예제 #2
0
def discriminator_forward(config,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = conv_2d(incoming, config.gen_dim, 5, strides=2, name="conv1")
        output = leaky_relu(output, 0.2)
        output = conv_2d(output,
                         2 * config.gen_dim,
                         5,
                         strides=2,
                         name="conv2")

        output = leaky_relu(output, 0.2)
        output = conv_2d(output,
                         4 * config.gen_dim,
                         5,
                         strides=2,
                         name="conv3")

        output = leaky_relu(output, 0.2)
        output = conv_2d(output,
                         8 * config.gen_dim,
                         5,
                         strides=2,
                         name="conv4")

        output = leaky_relu(output, 0.2)
        output = tf.reshape(output, [-1, 4 * 4 * 8 * config.gen_dim])
        output = fully_connected(output, 1, bias=False)

    return output
예제 #3
0
def discriminator_forward(config,
                          incoming,
                          labels,
                          scope="discriminator",
                          name=None,
                          reuse=False):

    with tf.variable_scope(scope, name, reuse=reuse):
        output = leaky_relu(
            batch_normalization(
                conv_2d(incoming, config.dim, 5, 2, name="conv1")), 0.2)

        output = leaky_relu(
            batch_normalization(
                conv_2d(output, 2 * config.dim, 5, 2, name="conv2")), 0.2)

        output_shared = conv_2d(output,
                                2 * config.dim,
                                5,
                                2,
                                name="conv3_shared")
        output_cs = [
            conv_2d(output, 2 * config.dim, 5, 2, name="conv3_cs")
            for _ in xrange(5)
        ]

        output = tf.concat([output_cs, output_shared])

        output = tf.reshape(output, [-1, 4 * 4 * 4 * config.dim])
        output = tf.reshape(fully_connected(output, 1, bias=False), [-10])
예제 #4
0
def discriminator_forward(config,
                          labels,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = leaky_relu(
            batch_normalization(
                conv_2d(incoming, config.dim, 5, 2, name="conv1"), 0.2))
        output = leaky_relu(
            batch_normalization(
                conv_2d(output, 2 * config.dim, 5, 2, name="conv2"), 0.2))
        output = leaky_relu(
            batch_normalization(
                conv_2d(output, 4 * config.dim, 5, 2, name="Conv3"), 0.2))
        output = tf.reshape(output, [-1, 4 * 4 * 4 * config.dim])

        output = fully_connected(output, 56 * config.dim, name="fc1_1")
        embed = fully_connected(labels, 8 * config.dim, name="fc1_2")

        output = leaky_relu(
            batch_normalization(tf.concat([output, embed], axis=-1)), 0.2)
        output = fully_connected(output, 8 * config.dim, name="fc2")
        output = batch_normalization(output)
        output = leaky_relu(output, 0.2)
        output = tf.reshape(fully_connected(output, 1, bias=False, name="fc3"),
                            [-1])

    return output
예제 #5
0
파일: mnist.py 프로젝트: sunnerzs/dpgan-1
def discriminator_forward(config,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = leaky_relu(conv_2d(incoming, config.dim, 5, 2), 0.2)
        output = leaky_relu(conv_2d(output, 2 * config.dim, 5, 2), 0.2)
        output = leaky_relu(conv_2d(output, 4 * config.dim, 5, 2), 0.2)

        output = tf.reshape(output, [-1, 4 * 4 * 4 * config.dim])
        output = tf.reshape(fully_connected(output, 1, bias=False), [-1])

    return output
예제 #6
0
def code_classifier_forward(config,
                            incoming=None,
                            image=None,
                            scope="code_classifier",
                            name=None,
                            reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = conv_2d(image, 32, 5, 2, name="conv1")
        output = residual_block(output,
                                2,
                                32,
                                downsample=True,
                                batch_norm=True,
                                name="rb1")
        output = residual_block(output,
                                1,
                                64,
                                downsample=True,
                                batch_norm=True,
                                name="rb2")
        output = leaky_relu(
            fully_connected(
                tf.reshape(output, [config.batch_size, 4 * 4 * 64]), 1024))

        prod = tf.matmul(code_output[:, :, None], output[:, None, :])
        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([
            code_output,
            tf.matmul(prob, output[:, :, None])[:, :, 0],
            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]
        ],
                           axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 5)

    return output