def pt_given_z(z,
               hidden_dim,
               batch_size,
               is_training,
               batch_norm,
               keep_prob,
               scope='generate_t_given_z',
               reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        # Variables
        input_shape = z.get_shape().as_list()[1]

        w_hi, b_hi = create_nn_weights('h_z_3', 'decoder',
                                       [input_shape, hidden_dim[-1]])
        hidden_z = dropout_normalised_mlp(layer_input=z,
                                          weights=w_hi,
                                          biases=b_hi,
                                          is_training=is_training,
                                          batch_norm=batch_norm,
                                          keep_prob=keep_prob,
                                          layer='h_z_decoder')

        noise = uniform(dim=hidden_dim[-1], batch_size=batch_size)
        hidden_z_plus_noise = tf.concat([hidden_z, noise], axis=1)
        input_shape = hidden_z_plus_noise.get_shape().as_list()[1]

        w_t, b_t = create_nn_weights('t', 'encoder', [input_shape, 1])
        t_mu = mlp_neuron(hidden_z_plus_noise, w_t, b_t, activation=False)
        return tf.exp(t_mu), z
예제 #2
0
def pt_log_normal_given_x(x,
                          hidden_dim,
                          is_training,
                          batch_norm,
                          keep_prob=1,
                          reuse=False,
                          scope='generate_t'):
    size = len(hidden_dim)
    with tf.variable_scope(scope, reuse=reuse):
        # Variables
        layer_input = hidden_mlp_layers(layer_input=x,
                                        is_training=is_training,
                                        batch_norm=batch_norm,
                                        keep_prob=keep_prob,
                                        size=len(hidden_dim),
                                        hidden_dim=hidden_dim)

        w_mu, b_mu = create_nn_weights('mu_t', 'decoder',
                                       [hidden_dim[size - 1], 1])
        w_logvar, b_logvar = create_nn_weights('var_t', 'decoder',
                                               [hidden_dim[size - 1], 1])
        # Model
        # Reconstruction layer
        t_mu = mlp_neuron(layer_input, w_mu, b_mu, activation=False)
        t_logvar = mlp_neuron(layer_input,
                              w_logvar,
                              b_logvar,
                              activation=False)
        squeezed_t_mu = tf.squeeze(t_mu)
        squeeze_t_logvar = tf.squeeze(t_logvar)
        return squeezed_t_mu, squeeze_t_logvar
def pz_given_x(x,
               hidden_dim,
               is_training,
               batch_norm,
               keep_prob=0.9,
               scope='generate_z_given_x',
               reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        input_shape = x.get_shape().as_list()[1]

        w_hi, b_hi = create_nn_weights('h_z_1', 'decoder',
                                       [input_shape, hidden_dim[0]])
        hidden_x = dropout_normalised_mlp(layer_input=x,
                                          weights=w_hi,
                                          biases=b_hi,
                                          is_training=is_training,
                                          batch_norm=batch_norm,
                                          keep_prob=keep_prob,
                                          layer='h_x_decoder')

        input_shape = hidden_x.get_shape().as_list()[1]

        w_hi, b_hi = create_nn_weights('h_z_2', 'decoder',
                                       [input_shape, hidden_dim[1]])

        hidden_z = dropout_normalised_mlp(layer_input=hidden_x,
                                          weights=w_hi,
                                          biases=b_hi,
                                          is_training=is_training,
                                          batch_norm=batch_norm,
                                          keep_prob=keep_prob,
                                          layer='h_z_decoder')

        return hidden_z
예제 #4
0
def generate_x_given_z(z,
                       hidden_dim,
                       latent_dim,
                       is_training,
                       batch_norm,
                       batch_size,
                       input_dim,
                       keep_prob=1,
                       reuse=False):
    # Generative p(x|z)
    size = len(hidden_dim)
    with tf.variable_scope("decoder", reuse=reuse):
        noise = uniform(dim=latent_dim, batch_size=batch_size)
        z_plus_noise = tf.concat([z, noise], axis=1)
        layer_input = hidden_mlp_layers(batch_norm=batch_norm,
                                        hidden_dim=hidden_dim,
                                        is_training=is_training,
                                        keep_prob=keep_prob,
                                        layer_input=z_plus_noise,
                                        size=size)

        w_x, b_x = create_nn_weights('x', 'decoder',
                                     [hidden_dim[size - 1], input_dim])
        # Model
        # Reconstruction layer
        x = mlp_neuron(layer_input, w_x, b_x, activation=False)
        return x
예제 #5
0
def sample_z(batch_norm, batch_size, hidden_dim, input_dim, is_training,
             keep_prob, latent_dim, size, x):
    noise = uniform(dim=input_dim, batch_size=batch_size)
    x_plus_noise = tf.concat([x, noise], axis=1)
    layer_input = hidden_mlp_layers(batch_norm=batch_norm,
                                    hidden_dim=hidden_dim,
                                    is_training=is_training,
                                    keep_prob=keep_prob,
                                    layer_input=x_plus_noise,
                                    size=size)
    w_z, b_z = create_nn_weights('z', 'encoder',
                                 [hidden_dim[size - 1], latent_dim])
    z = mlp_neuron(layer_input, w_z, b_z, activation=False)
    return z
예제 #6
0
def pt_given_x(x, hidden_dim, is_training, batch_norm, batch_size, input_dim, noise_alpha, keep_prob=0.9, reuse=False):
    size = len(hidden_dim)
    with tf.variable_scope('generate_t_given_x', reuse=reuse):
        # Variables
        noise = uniform(dim=input_dim, batch_size=batch_size) * tf.gather(noise_alpha, 0)
        x_plus_noise = tf.concat([x, noise], axis=1)
        hidden_x = hidden_mlp_layers_noise(batch_norm=batch_norm, hidden_dim=hidden_dim,
                                           is_training=is_training, keep_prob=keep_prob,
                                           layer_input=x_plus_noise, size=size, batch_size=batch_size,
                                           noise_alpha=noise_alpha)

        w_t, b_t = create_nn_weights('t', 'encoder', [hidden_x.get_shape().as_list()[1], 1])
        t_mu = mlp_neuron(hidden_x, w_t, b_t, activation=False)
        return tf.exp(t_mu)
예제 #7
0
def discriminator(pair_one, pair_two, hidden_dim, is_training, batch_norm, scope, keep_prob=1, reuse=False):
    size = len(hidden_dim)
    with tf.variable_scope(scope, reuse=reuse):
        # Variables
        print("scope:{}, pair_one:{}, pair_two:{}".format(scope, pair_one.shape, pair_two.shape))
        hidden_pair_one = hidden_mlp_layers(batch_norm=batch_norm, hidden_dim=hidden_dim,
                                            is_training=is_training, keep_prob=keep_prob,
                                            layer_input=pair_one, size=size)

        hidden_pair_two = hidden_mlp_layers(batch_norm=batch_norm, hidden_dim=hidden_dim,
                                            is_training=is_training, keep_prob=keep_prob,
                                            layer_input=pair_two, size=size)
        hidden_pairs = tf.concat([hidden_pair_one, hidden_pair_two], axis=1)
        print("hidden_pairs:{}".format(hidden_pairs.get_shape()))
        w_logit, b_logit = create_nn_weights('logits', 'discriminator', [hidden_dim[size - 1] * 2, 1])
        f = mlp_neuron(layer_input=hidden_pairs, weights=w_logit, biases=b_logit, activation=False)
        logit = tf.nn.sigmoid(f)

        return tf.squeeze(logit), tf.squeeze(f)