def pt_given_z(z,
               hidden_dim,
               batch_size,
               is_training,
               batch_norm,
               keep_prob,
               scope='generate_t_given_z',
               reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        # Variables
        input_shape = z.get_shape().as_list()[1]

        w_hi, b_hi = create_nn_weights('h_z_3', 'decoder',
                                       [input_shape, hidden_dim[-1]])
        hidden_z = dropout_normalised_mlp(layer_input=z,
                                          weights=w_hi,
                                          biases=b_hi,
                                          is_training=is_training,
                                          batch_norm=batch_norm,
                                          keep_prob=keep_prob,
                                          layer='h_z_decoder')

        noise = uniform(dim=hidden_dim[-1], batch_size=batch_size)
        hidden_z_plus_noise = tf.concat([hidden_z, noise], axis=1)
        input_shape = hidden_z_plus_noise.get_shape().as_list()[1]

        w_t, b_t = create_nn_weights('t', 'encoder', [input_shape, 1])
        t_mu = mlp_neuron(hidden_z_plus_noise, w_t, b_t, activation=False)
        return tf.exp(t_mu), z
예제 #2
0
def generate_x_given_z(z,
                       hidden_dim,
                       latent_dim,
                       is_training,
                       batch_norm,
                       batch_size,
                       input_dim,
                       keep_prob=1,
                       reuse=False):
    # Generative p(x|z)
    size = len(hidden_dim)
    with tf.variable_scope("decoder", reuse=reuse):
        noise = uniform(dim=latent_dim, batch_size=batch_size)
        z_plus_noise = tf.concat([z, noise], axis=1)
        layer_input = hidden_mlp_layers(batch_norm=batch_norm,
                                        hidden_dim=hidden_dim,
                                        is_training=is_training,
                                        keep_prob=keep_prob,
                                        layer_input=z_plus_noise,
                                        size=size)

        w_x, b_x = create_nn_weights('x', 'decoder',
                                     [hidden_dim[size - 1], input_dim])
        # Model
        # Reconstruction layer
        x = mlp_neuron(layer_input, w_x, b_x, activation=False)
        return x
예제 #3
0
def sample_z(batch_norm, batch_size, hidden_dim, input_dim, is_training,
             keep_prob, latent_dim, size, x):
    noise = uniform(dim=input_dim, batch_size=batch_size)
    x_plus_noise = tf.concat([x, noise], axis=1)
    layer_input = hidden_mlp_layers(batch_norm=batch_norm,
                                    hidden_dim=hidden_dim,
                                    is_training=is_training,
                                    keep_prob=keep_prob,
                                    layer_input=x_plus_noise,
                                    size=size)
    w_z, b_z = create_nn_weights('z', 'encoder',
                                 [hidden_dim[size - 1], latent_dim])
    z = mlp_neuron(layer_input, w_z, b_z, activation=False)
    return z
예제 #4
0
def pt_given_x(x, hidden_dim, is_training, batch_norm, batch_size, input_dim, noise_alpha, keep_prob=0.9, reuse=False):
    size = len(hidden_dim)
    with tf.variable_scope('generate_t_given_x', reuse=reuse):
        # Variables
        noise = uniform(dim=input_dim, batch_size=batch_size) * tf.gather(noise_alpha, 0)
        x_plus_noise = tf.concat([x, noise], axis=1)
        hidden_x = hidden_mlp_layers_noise(batch_norm=batch_norm, hidden_dim=hidden_dim,
                                           is_training=is_training, keep_prob=keep_prob,
                                           layer_input=x_plus_noise, size=size, batch_size=batch_size,
                                           noise_alpha=noise_alpha)

        w_t, b_t = create_nn_weights('t', 'encoder', [hidden_x.get_shape().as_list()[1], 1])
        t_mu = mlp_neuron(hidden_x, w_t, b_t, activation=False)
        return tf.exp(t_mu)
def hidden_mlp_layers_noise(batch_norm, hidden_dim, is_training, keep_prob, layer_input, noise_alpha, size,
                            batch_size):
    tmp = layer_input
    for i in np.arange(size):
        input_shape = tmp.get_shape().as_list()[1]
        print("layer input shape:{}".format(input_shape))
        w_hi, b_hi = create_nn_weights('h{}_z'.format(i), 'decoder', [input_shape, hidden_dim[i]])
        h_i = dropout_normalised_mlp(layer_input=tmp, weights=w_hi, biases=b_hi,
                                     is_training=is_training,
                                     batch_norm=batch_norm, keep_prob=keep_prob,
                                     layer='h{}_z_decoder'.format(i))

        # noise = standard_gaussian(dim=hidden_dim[i], batch_size=batch_size) * tf.gather(noise_alpha, i + 1)
        noise = uniform(dim=hidden_dim[i], batch_size=batch_size) * tf.gather(noise_alpha, i + 1)
        tmp = tf.concat([h_i, noise], axis=1)
    return tmp