Example #1
0
def decoder_shared(z, is_training, **hparams):
    activation = functools.partial(tf.nn.leaky_relu, alpha=0.2)
    #norm = lambda x: x
    norm = get_normalization_fn("instancenorm",
                                is_training)  # TODO: Not mentioned in paper.

    net = z
    net = res_block(net, 512, 3, norm, activation)

    return net
Example #2
0
def encoder(x, is_training, **hparams):

    activation = tf.nn.relu
    norm = get_normalization_fn("instancenorm", is_training)

    # TODO: norm or activation first? Paper says relu then norm, code the opposite

    net = x
    net = norm(activation(conv(net, 64, 7, 1)))
    net = norm(activation(conv(net, 128, 4, 2)))
    net = norm(activation(conv(net, 256, 4, 2)))

    return net
Example #3
0
def decoder_private(h, is_training, **hparams):
    activation = functools.partial(tf.nn.leaky_relu, alpha=0.2)
    #norm = lambda x: x
    norm = get_normalization_fn("instancenorm",
                                is_training)  # TODO: Not mentioned in paper.

    net = h
    net = res_block(net, 512, 3, norm, activation)
    net = res_block(net, 512, 3, norm, activation)
    net = res_block(net, 512, 3, norm, activation)
    net = norm(activation(dconv(net, 256, 3, 2)))
    net = norm(activation(dconv(net, 128, 3, 2)))
    net = tf.nn.tanh(dconv(net, 3, 1, 1))

    return net
Example #4
0
def encoder_private(x, is_training, **hparams):
    activation = functools.partial(tf.nn.leaky_relu, alpha=0.2)
    #norm = lambda x: x  # TODO: No normalization mentioned in paper? what about in resblock?
    norm = get_normalization_fn("instancenorm",
                                is_training)  # TODO: Not mentioned in paper.

    net = x
    net = activation(norm(conv(net, 64, 7, 1)))
    net = activation(norm(conv(net, 128, 3, 2)))
    net = activation(norm(conv(net, 256, 3, 2)))
    net = res_block(net, 512, 3, norm, activation)
    net = res_block(net, 512, 3, norm, activation)
    net = res_block(net, 512, 3, norm, activation)

    return net
Example #5
0
def decoder(z, x_orig, n_attributes, is_training, **hparams):

    activation = tf.nn.relu
    norm = get_normalization_fn("instancenorm", is_training)

    net = z
    net = norm(activation(dconv(net, 128, 4, 2)))

    def decoder_head():
        h = norm(activation(dconv(net, 64, 4, 2)))
        h = tf.concat((h, x_orig))
        h = tf.nn.tanh(conv(h, 3, 7, 1))
        return h

    return tuple(decoder_head() for _ in range(n_attributes))
Example #6
0
def bottleneck(z, is_training, **hparams):

    activation = tf.nn.relu
    norm = get_normalization_fn("instancenorm", is_training)

    # TODO: norm or activation first in resblock?
    # TODO: One or two convs in resblock?

    net = z
    net = res_block(net, 256, 3, norm, activation)
    net = res_block(net, 256, 3, norm, activation)
    net = res_block(net, 256, 3, norm, activation)
    net = res_block(net, 256, 3, norm, activation)
    net = res_block(net, 256, 3, norm, activation)
    net = res_block(net, 256, 3, norm, activation)

    return net