def get_cls_head(features,
                 label,
                 dropout_ratio=0.5,
                 bn_decay=0.9,
                 weights=None):
    """
    Get symbol for ModelNet40 shape classification
    """
    net = fully_connected(features,
                          num_hidden=512,
                          flatten=True,
                          bn_decay=bn_decay,
                          dropout_ratio=dropout_ratio,
                          scope='fc1')
    net = fully_connected(net,
                          num_hidden=256,
                          bn_decay=bn_decay,
                          dropout_ratio=dropout_ratio,
                          scope='fc2')
    net = mx.symbol.FullyConnected(net, num_hidden=40, name='fc3')
    if weights is not None:
        net = mx.symbol.Custom(data=net,
                               weight=weights,
                               input_dim=2,
                               name='fc3_weighted',
                               op_type='weighted_gradient')
    net = mx.symbol.SoftmaxOutput(data=net,
                                  label=label,
                                  normalization='batch',
                                  name='pred')
    return net
示例#2
0
def classifier_forward(config,
                       incoming,
                       name=None,
                       reuse=False,
                       scope="classifier"):
    with tf.variable_scope(scope, name, reuse=reuse):
        network = incoming
        network = relu(
            batch_normalization(
                conv_2d(network,
                        32,
                        5,
                        activation='relu',
                        regularizer="L2",
                        strides=2)))
        network = relu(
            batch_normalization(
                conv_2d(network,
                        64,
                        5,
                        activation='relu',
                        regularizer="L2",
                        strides=2)))
        network = flatten(network)

        network = relu(batch_normalization(fully_connected(network, 1024)))
        network = dropout(network, 0.5)

        network = fully_connected(network, 10)

    return network
示例#3
0
def discriminator_forward(config,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = conv_2d(incoming, config.gen_dim, 5, strides=2, name="conv1")
        output = leaky_relu(output, 0.2)
        output = conv_2d(output,
                         2 * config.gen_dim,
                         5,
                         strides=2,
                         name="conv2")

        output = leaky_relu(output, 0.2)
        output = conv_2d(output,
                         4 * config.gen_dim,
                         5,
                         strides=2,
                         name="conv3")

        output = leaky_relu(output, 0.2)
        output = conv_2d(output,
                         8 * config.gen_dim,
                         5,
                         strides=2,
                         name="conv4")

        output = leaky_relu(output, 0.2)
        output = tf.reshape(output, [-1, 4 * 4 * 8 * config.gen_dim])
        output = fully_connected(output, 1, bias=False)

    return output
示例#4
0
def generator_forward(config,
                      noise=None,
                      scope="generator",
                      name=None,
                      num_samples=-1,
                      reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")

        output = fully_connected(noise, 4 * 4 * config.gen_dim, name="input")
        output = tf.reshape(output, [-1, 4, 4, config.gen_dim])

        output = residual_block_upsample(output, config.gen_dim, 3, name="rb1")
        output = residual_block_upsample(output, config.gen_dim, 3, name="rb2")
        output = residual_block_upsample(output, config.gen_dim, 3, name="rb3")

        output = batch_normalization(output)
        output = tf.nn.relu(output)
        output = conv_2d(output, 3, 3, name="output")
        output = tf.tanh(output)

    return output
示例#5
0
def discriminator_forward(config,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = conv_2d(incoming, config.disc_dim, 3, name="input")

        output = residual_block_downsample(output,
                                           2 * config.disc_dim,
                                           3,
                                           name="rb1")
        output = residual_block_downsample(output,
                                           4 * config.disc_dim,
                                           3,
                                           name="rb2")
        output = residual_block_downsample(output,
                                           8 * config.disc_dim,
                                           3,
                                           name="rb3")
        output = residual_block_downsample(output,
                                           8 * config.disc_dim,
                                           3,
                                           name="rb4")

        output = tf.reshape(output, [-1, 4 * 4 * 8 * config.disc_dim])
        output = fully_connected(output, 1, name="output")

    return tf.reshape(output, [-1])
示例#6
0
文件: mnist.py 项目: sunnerzs/dpgan-1
def generator_forward(config,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")

        output = fully_connected(noise, 4 * 4 * 4 * config.dim)
        output = batch_normalization(output)
        output = tf.nn.relu(output)
        output = tf.reshape(output, [-1, 4, 4, 4 * config.dim])

        output = conv_2d_transpose(output,
                                   2 * config.dim,
                                   5, [8, 8],
                                   strides=2)
        output = output[:, :7, :7, :]

        output = conv_2d_transpose(output, config.dim, 5, [14, 14], strides=2)
        output = tf.nn.relu(output)

        output = conv_2d_transpose(output, 1, 5, [28, 28], strides=2)

        output = tf.tanh(output)

    return output
示例#7
0
def classifier_forward(config,
                       incoming,
                       name=None,
                       reuse=False,
                       scope="classifier"):
    with tf.variable_scope(scope, name, reuse=reuse):
        network = incoming
        network = relu(conv_2d(network, 32, 5, strides=2))
        network = relu(conv_2d(network, 64, 5, strides=2))
        network = flatten(network)

        network = relu(fully_connected(network, 1024))
        network = dropout(network, 0.7)

        network = fully_connected(network, 10)

    return network
示例#8
0
def generator_forward(config,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")

        output = fully_connected(noise,
                                 4 * 4 * 8 * config.gen_dim,
                                 name="input")
        output = tf.reshape(output, [-1, 4, 4, 8 * config.gen_dim])
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   4 * config.gen_dim,
                                   5, [8, 8],
                                   name="conv1",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   2 * config.gen_dim,
                                   5, [16, 16],
                                   name="conv2",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   config.gen_dim,
                                   5, [32, 32],
                                   name="conv3",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   3,
                                   5, [64, 64],
                                   name="conv4",
                                   strides=2)
        output = tf.tanh(output)

    return output
示例#9
0
文件: mnist.py 项目: sunnerzs/dpgan-1
def discriminator_forward(config,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = leaky_relu(conv_2d(incoming, config.dim, 5, 2), 0.2)
        output = leaky_relu(conv_2d(output, 2 * config.dim, 5, 2), 0.2)
        output = leaky_relu(conv_2d(output, 4 * config.dim, 5, 2), 0.2)

        output = tf.reshape(output, [-1, 4 * 4 * 4 * config.dim])
        output = tf.reshape(fully_connected(output, 1, bias=False), [-1])

    return output
示例#10
0
def discriminator_forward(config,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = optimized_residual_block(incoming, config.disc_dim, 3)

        output = residual_block_downsample(output,
                                           config.disc_dim,
                                           3,
                                           name="rb1")
        output = residual_block(output, config.disc_dim, 3, name="rb2")
        output = residual_block(output, config.disc_dim, 3, name="rb3")

        output = tf.nn.relu(output)
        output = tf.reduce_mean(output, axis=[1, 2])
        output = fully_connected(output, 1, name="output")

    return tf.reshape(output, [-1])