def decoder(z, inputs=None, reuse=False, sequence_length=input_dim):
    if reuse:
        tf.get_variable_scope().reuse_variables()
    with tf.variable_scope('Decoder'):
        lstm_cell_1 = tf.nn.rnn_cell.LSTMCell(hidden_layer)
        lstm_cell_2 = tf.nn.rnn_cell.LSTMCell(input_dim)
        expanded_z_1 = tf.nn.relu(linear(z, hidden_layer, 'linear_expand_z_1'))
        expanded_z_2 = tf.nn.relu(linear(z, input_dim, 'linear_expand_z_2'))
        initial_state_1 = (expanded_z_1, expanded_z_1)
        initial_state_2 = (expanded_z_2, expanded_z_2)
        first_input = tf.add(tf.zeros_like(expanded_z_1[:, :input_dim]), 1.)

        # Use custom seq2seq module
        outputs, states = seq2seq([lstm_cell_1, lstm_cell_2],
                                  [initial_state_1, initial_state_2],
                                  first_input,
                                  sequence_length,
                                  inputs=inputs)

        # logits = tf.add_n(outputs)
        print('outputs {}'.format(outputs[0].get_shape()))
        logits = tf.stack(outputs, axis=1, name='logits')
        print('logits {}'.format(logits.get_shape()))
        logits_reshaped = tf.reshape(logits, [-1, input_dim * sequence_length])
        out = tf.nn.sigmoid(logits_reshaped)
        return out, logits_reshaped
def discriminator(x_hat, reuse=False):
    dis_linear_1 = tf.nn.relu(linear(x_hat, discriminator_hidden_layer, 'dis_linear_1'))
    dis_linear_2 = tf.nn.relu(linear(dis_linear_1, discriminator_hidden_layer, 'dis_linear_2'))
    dis_linear_3 = tf.nn.relu(linear(dis_linear_2, discriminator_hidden_layer, 'dis_linear_3'))
    logits = linear(dis_linear_3, 1, 'dis_logits')
    prob = tf.nn.sigmoid(logits)
    return prob
def discriminator(z, reuse=False):
    dis_linear_1 = tf.nn.relu(
        linear(z, hidden_layer_discriminator, 'dis_linear_1'))
    dis_linear_2 = tf.nn.relu(
        linear(dis_linear_1, hidden_layer_discriminator, 'dis_linear_2'))
    logits = linear(dis_linear_2, 1, 'dis_logits')
    prob = tf.nn.sigmoid(logits)
    return prob
def encoder(x, c):
    with tf.name_scope('Condition_input'):
        inputs = tf.concat(axis=1, values=[x, c])

    e_linear_1 = tf.nn.relu(linear(inputs, hidden_layer1, 'e_linear_1'))
    e_linear_2 = tf.nn.relu(linear(e_linear_1, hidden_layer2, 'e_linear_2'))
    latent_variable = linear(e_linear_2, options.z_dim, 'e_latent_variable')
    return latent_variable
def discriminator_z(z):
    dis_linear_1 = tf.nn.relu(
        linear(z, discriminator_z_hidden_layer, 'dis_z_linear_1'))
    dis_linear_2 = tf.nn.relu(
        linear(dis_linear_1, discriminator_z_hidden_layer, 'dis_z_linear_2'))
    logits = linear(dis_linear_2, 1, 'dis_z_logits')
    prob = tf.nn.sigmoid(logits)
    return prob
def decoder(z, c):
    with tf.name_scope('Condition_latent_variable'):
        inputs = tf.concat(axis=1, values=[z, c])

    d_linear_1 = tf.nn.relu(linear(inputs, hidden_layer2, 'd_linear_1'))
    d_linear_2 = tf.nn.relu(linear(d_linear_1, hidden_layer1, 'd_linear_2'))
    logits = linear(d_linear_2, input_dim, 'd_logits')
    prob = tf.nn.sigmoid(logits)
    return prob, logits
Exemplo n.º 7
0
def encoder(x, c):
    with tf.name_scope('Condition_input'):
        inputs = tf.concat(axis=1, values=[x, c])

    e_linear_1 = tf.nn.relu(linear(inputs, hidden_layer1, 'e_linear_1'))
    e_linear_2 = tf.nn.relu(linear(e_linear_1, hidden_layer2, 'e_linear_2'))
    z_mu = linear(e_linear_2, options.z_dim, 'z_mu')
    z_logvar = linear(e_linear_2, options.z_dim, 'z_logvar')
    return z_mu, z_logvar
def encoder(x, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()
    with tf.variable_scope('Encoder'):
        e_cell1 = tf.nn.rnn_cell.LSTMCell(hidden_layer)
        _, states = tf.contrib.rnn.static_rnn(e_cell1, x, dtype=tf.float32)
        state_c, state_h = states
        hidden_state = tf.concat([state_c, state_h], 1)
        z_mu = linear(hidden_state, options.z_dim, 'z_mu')
        z_logvar = linear(hidden_state, options.z_dim, 'z_logvar')
        return z_mu, z_logvar
def encoder(X, c, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()

    with tf.name_scope('Condition_input'):
        inputs = tf.concat(axis=1, values=[X, c])

    e_linear_1 = tf.nn.relu(linear(inputs, hidden_layer1, 'e_linear_1'))
    e_linear_2 = tf.nn.relu(linear(e_linear_1, hidden_layer2, 'e_linear_2'))
    z_mu = linear(e_linear_2, options.z_dim, 'z_mu')
    z_logvar = linear(e_linear_2, options.z_dim, 'z_logvar')
    return z_mu, z_logvar
Exemplo n.º 10
0
def discriminator(x, c):
    with tf.name_scope('Condition_input_variable'):
        inputs = tf.concat(axis=1, values=[x, c])

    dis_linear_1 = tf.nn.relu(
        linear(inputs, discriminator_hidden_layer, 'dis_linear_1'))
    dis_linear_2 = tf.nn.relu(
        linear(dis_linear_1, discriminator_hidden_layer, 'dis_linear_2'))
    dis_linear_3 = tf.nn.relu(
        linear(dis_linear_2, discriminator_hidden_layer, 'dis_linear_3'))
    logits = linear(dis_linear_3, 1, 'dis_logits')
    prob = tf.nn.sigmoid(logits)
    return prob
def encoder(x, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()
    with tf.variable_scope('Encoder'):
        e_cell1 = tf.contrib.rnn.LayerNormBasicLSTMCell(hidden_layer)
        e_cells = tf.contrib.rnn.MultiRNNCell([e_cell1])
        outputs, _ = tf.contrib.rnn.static_rnn(e_cells, x, dtype=tf.float32)
        latent_variable = linear(outputs[-1], options.z_dim,
                                 'e_latent_variable')
        return latent_variable
def decoder(z, inputs=None, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()
    with tf.variable_scope('Decoder'):
        lstm_cell = tf.nn.rnn_cell.LSTMCell(input_dim, state_is_tuple=False)
        expanded_z = tf.nn.relu(linear(z, input_dim*2, 'linear_expand_z'))
        first_input = tf.add(tf.zeros_like(expanded_z[:, :input_dim]), 1.)
        output, state = lstm_cell(first_input, expanded_z)
        outputs = []
        outputs.append(output)
        for i in range(num_frames-1):
            # Training
            if inputs:
                output, state = lstm_cell(inputs[i], state)
            else:
                output, state = lstm_cell(output, state)
            outputs.append(output)

        logits = tf.stack(outputs, axis=1, name='logits')
        logits_reshaped = tf.reshape(logits, [-1, num_frames * input_dim])
        out = tf.nn.tanh(logits_reshaped)
        return out, logits_reshaped
Exemplo n.º 13
0
def decoder(z):
    d_linear_1 = tf.nn.relu(linear(z, hidden_layer2, 'd_linear_1'))
    d_linear_2 = tf.nn.relu(linear(d_linear_1, hidden_layer1, 'd_linear_2'))
    logits = linear(d_linear_2, input_dim, 'd_logits')
    prob = tf.nn.sigmoid(logits)
    return prob, logits
Exemplo n.º 14
0
def encoder(x):
    e_linear_1 = tf.nn.relu(linear(x, hidden_layer1, 'e_linear_1'))
    e_linear_2 = tf.nn.relu(linear(e_linear_1, hidden_layer2, 'e_linear_2'))
    latent_variable = linear(e_linear_2, options.z_dim, 'e_latent_variable')
    return latent_variable
Exemplo n.º 15
0
def encoder(x):
    e_linear_1 = tf.nn.relu(linear(x, hidden_layer1, 'e_linear_1'))
    e_linear_2 = tf.nn.relu(linear(e_linear_1, hidden_layer2, 'e_linear_2'))
    z_mu = linear(e_linear_2, options.z_dim, 'z_mu')
    z_logvar = linear(e_linear_2, options.z_dim, 'z_logvar')
    return z_mu, z_logvar