Пример #1
0
def model(his_style, fut_style, use_prior, params, is_training):
  quantity = his_style.get_shape().as_list()[0]
  embed_dim = params.embed_dim
  noise_dim = params.noise_dim

  outputs = dict()
  with slim.arg_scope(
    [slim.fully_connected],
      weights_initializer=tf.truncated_normal_initializer(stddev=0.002, seed=1)):
    # Vector concat
    net = tf.concat([his_style, fut_style], axis=1)
    for i in xrange(params.latent_fc_layers):
      net = resnet_block(net, embed_dim, afn=tf.nn.relu, nfn=None)
      net = layer_norm(net)
    mu = slim.fully_connected(net, noise_dim, activation_fn=None)
    logs2 = slim.fully_connected(net, noise_dim, activation_fn=None)
  outputs['mu'] = mu
  outputs['logs2'] = logs2

  if is_training or (not hasattr(params, 'sample_pdf')) or (not hasattr(params, 'sample_temp')):
    temp = 1.0
  else:
    temp = params.sample_temp

  noise_vec = tf.random_normal(shape=[quantity, noise_dim], dtype=tf.float32)
  if use_prior:
    outputs['latent'] = noise_vec * temp
  else:
    outputs['latent'] = mu + tf.multiply(noise_vec, tf.exp(0.5 * logs2)) * temp
  return outputs
Пример #2
0
def add_fn(net, prev_input, params):
    out_dim = net.get_shape().as_list()[1]
    net = tf.concat([net, prev_input], axis=1)
    for layer_i in xrange(params.latent_fc_layers):
        net = resnet_block(net, out_dim, afn=tf.nn.relu, nfn=None)
        net = layer_norm(net)
    net = slim.fully_connected(net,
                               out_dim,
                               activation_fn=None,
                               normalizer_fn=None)
    return net
Пример #3
0
def model(latent_input, prev_content, prev_style, params, is_training):
    embed_dim = params.embed_dim
    state_size = params.dec_rnn_size * 2
    style_dim = params.embed_dim - params.content_dim
    #
    outputs = dict()
    with slim.arg_scope([slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(
                            stddev=0.002, seed=1)):
        net = latent_input
        inv_z = slim.fully_connected(net,
                                     style_dim,
                                     activation_fn=None,
                                     normalizer_fn=layer_norm)
        net = tf.concat([inv_z, prev_style], 1)
        for layer_i in xrange(params.latent_fc_layers):
            net = resnet_block(net, style_dim, afn=tf.nn.relu, nfn=None)
            net = layer_norm(net)
        new_style = slim.fully_connected(net,
                                         style_dim,
                                         activation_fn=None,
                                         normalizer_fn=None)
        if hasattr(params, 'T_layer_norm') and params.T_layer_norm > 0:
            new_style = layer_norm(new_style)
        outputs['new_style'] = tf.identity(new_style)
        #
        net = tf.concat([prev_content, new_style], axis=1)
        for layer_i in xrange(params.dec_fc_layers):
            net = resnet_block(net, embed_dim, afn=tf.nn.relu, nfn=None)
            net = layer_norm(net)
        #
        h0 = slim.fully_connected(net,
                                  state_size / 2,
                                  activation_fn=tf.nn.tanh)
        c0 = slim.fully_connected(net, state_size / 2, activation_fn=None)
    outputs['dec_embedding'] = tf.concat([c0, h0], 1)
    return outputs
Пример #4
0
def model(latent_input, prev_content, prev_style, params, is_training):
    embed_dim = params.embed_dim
    state_size = params.dec_rnn_size * 2
    style_dim = params.embed_dim - params.content_dim

    outputs = dict()
    with slim.arg_scope([slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(
                            stddev=0.002, seed=1)):
        net = latent_input
        inv_z = slim.fully_connected(net,
                                     style_dim,
                                     activation_fn=None,
                                     normalizer_fn=layer_norm)
        interaction_fn = get_interaction_fn(params.dec_interaction)
        slim.summaries.add_histogram_summary(inv_z,
                                             name='delta_z_activation',
                                             prefix='summaries')
        T_new = interaction_fn(inv_z * params.use_latent, prev_style, params)
        if hasattr(params, 'T_layer_norm') and params.T_layer_norm > 0:
            T_new = layer_norm(T_new)
        new_style = T_new + prev_style
        if hasattr(params, 'T_layer_norm') and params.T_layer_norm > 0:
            new_style = layer_norm(new_style)
        outputs['new_style'] = tf.identity(new_style)
        #
        net = tf.concat([prev_content, new_style], axis=1)
        for layer_i in xrange(params.dec_fc_layers):
            net = resnet_block(net, embed_dim, afn=tf.nn.relu, nfn=None)
            net = layer_norm(net)
        #
        h0 = slim.fully_connected(net,
                                  state_size / 2,
                                  activation_fn=tf.nn.tanh)
        c0 = slim.fully_connected(net, state_size / 2, activation_fn=None)
    outputs['dec_embedding'] = tf.concat([c0, h0], 1)
    return outputs