Ejemplo n.º 1
0
def qy_given_ax(a,
                x,
                input_dim,
                hidden_dim,
                latent_dim,
                num_classes,
                batch_norm,
                is_training,
                reuse=False):
    # Classifier q(y|a,x)
    with tf.variable_scope("y_classifier", reuse=reuse):
        w_h1_a, b_h1_a = create_nn_weights('y_h1_a', 'infer',
                                           [latent_dim, hidden_dim])
        w_h1_x, b_h1_x = create_nn_weights('y_h1_a', 'infer',
                                           [input_dim, hidden_dim])

        w_h1, b_h1 = create_nn_weights('y_h1', 'infer',
                                       [hidden_dim, hidden_dim])
        # w_h1, b_h1 = create_nn_weights('y_h1', 'infer', [latent_dim + input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('y_h2', 'infer',
                                       [hidden_dim, hidden_dim])
        w_y, b_y = create_nn_weights('y_fully_connected', 'infer',
                                     [hidden_dim, num_classes])

        l_qa_to_qy = mlp_neuron(a, w_h1_a, b_h1_a, activation=False)
        l_x_to_qy = mlp_neuron(x, w_h1_x, b_h1_x, activation=False)

        h1 = normalized_mlp(tf.add(l_x_to_qy, l_qa_to_qy),
                            w_h1,
                            b_h1,
                            is_training,
                            batch_norm=batch_norm)
        h2 = normalized_mlp(h1, w_h2, b_h2, is_training, batch_norm=batch_norm)
        logits = mlp_neuron(h2, w_y, b_y, activation=False)
    return logits
Ejemplo n.º 2
0
def qy_given_z1(z1, input_dim, hidden_dim, num_classes, reuse=False):
    with tf.variable_scope("y_classifier", reuse=reuse):
        w_h1, b_h1 = create_nn_weights('y_h1', 'infer', [input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('y_h2', 'infer', [hidden_dim, hidden_dim])
        w_y, b_y = create_nn_weights('y_fully_connected', 'infer', [hidden_dim, num_classes])

        h1 = mlp_neuron(z1, w_h1, b_h1)
        h2 = mlp_neuron(h1, w_h2, b_h2)
        logits = mlp_neuron(h2, w_y, b_y, activation=False)
    return logits
    def build_model(self):
        with tf.variable_scope("y_classifier"):
            w_h1, b_h1 = create_nn_weights('y_h1', 'infer', [self.input_dim, self.hidden_dim])
            w_h2, b_h2 = create_nn_weights('y_h2', 'infer', [self.hidden_dim, self.hidden_dim])
            w_y, b_y = create_nn_weights('y_fully_connected', 'infer', [self.hidden_dim, self.num_classes])

            h1 = mlp_neuron(self.x, w_h1, b_h1)
            h2 = mlp_neuron(h1, w_h2, b_h2)
            logits = mlp_neuron(h2, w_y, b_y, activation=False)
            y_pred = tf.nn.softmax(logits)
            y_pred_cls = tf.argmax(y_pred, axis=1)
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.y)
            cost = tf.reduce_mean(cross_entropy)
        return logits, y_pred_cls, cost
Ejemplo n.º 4
0
def px_given_zya(z,
                 y,
                 qa,
                 hidden_dim,
                 input_dim,
                 latent_dim,
                 num_classes,
                 is_training,
                 batch_norm,
                 reuse=False):
    # Generative p(x|z,y)
    with tf.variable_scope("decoder", reuse=reuse):
        # Variables
        w_h1_z, b_h1_z = create_nn_weights('h1_x_z', 'decoder',
                                           [latent_dim, hidden_dim])
        w_h1_y, b_h1_y = create_nn_weights('h1_x_y', 'decoder',
                                           [num_classes, hidden_dim])
        w_h1_a, b_h1_a = create_nn_weights('h1_x_a', 'decoder',
                                           [latent_dim, hidden_dim])

        w_h1, b_h1 = create_nn_weights('h1_x', 'decoder',
                                       [hidden_dim, hidden_dim])
        # w_h1, b_h1 = create_nn_weights('h1_x', 'decoder', [latent_dim + num_classes, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_x', 'decoder',
                                       [hidden_dim, hidden_dim])

        w_mu, b_mu = create_nn_weights('mu_x', 'decoder',
                                       [hidden_dim, input_dim])
        # Model
        # Decoder hidden layer
        l_y_to_px = mlp_neuron(y, w_h1_y, b_h1_y, activation=False)
        l_qz_to_px = mlp_neuron(z, w_h1_z, b_h1_z, activation=False)
        l_qa_to_px = mlp_neuron(qa, w_h1_a, b_h1_a, activation=False)

        h1 = normalized_mlp(l_y_to_px + l_qz_to_px + l_qa_to_px,
                            w_h1,
                            b_h1,
                            is_training,
                            batch_norm=batch_norm)
        h2 = normalized_mlp(h1, w_h2, b_h2, is_training, batch_norm=batch_norm)

        # Reconstruction layer
        # x_mu = mlp_neuron(h2, w_mu, b_mu, activation=False)
        fully_connected = mlp_neuron(
            h2, w_mu, b_mu, activation=False)  # TODO look at activation?

        x_mu = tf.nn.sigmoid(fully_connected)
        return x_mu
Ejemplo n.º 5
0
def q_z2_given_z1y(z1, y, latent_dim, num_classes, hidden_dim, input_dim, reuse=False):
    with tf.variable_scope("encoder_M2", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_z2', 'encoder',
                                       [input_dim + num_classes, hidden_dim])

        w_mu_z2, b_mu_z2 = create_nn_weights('mu_z2', 'encoder', [hidden_dim, latent_dim])
        w_var_z2, b_var_z2 = create_nn_weights('var_z2', 'encoder', [hidden_dim, latent_dim])

        # Hidden layers
        h1 = mlp_neuron(tf.concat([z1, y], axis=1), w_h1, b_h1)
        # Z2 latent layer mu and var
        logvar_z2 = mlp_neuron(h1, w_var_z2, b_var_z2, activation=False)
        mu_z2 = mlp_neuron(h1, w_mu_z2, b_mu_z2, activation=False)
        z2 = draw_norm(latent_dim, mu_z2, logvar_z2)
        return z2, mu_z2, logvar_z2
Ejemplo n.º 6
0
def qz_given_ayx(a,
                 y,
                 x,
                 latent_dim,
                 num_classes,
                 hidden_dim,
                 input_dim,
                 is_training,
                 batch_norm,
                 reuse=False):
    # Recognition q(z|x,a,y)
    with tf.variable_scope("encoder", reuse=reuse):
        # Variables
        w_h1_a, b_h1_a = create_nn_weights('h1_z_a', 'encoder',
                                           [latent_dim, hidden_dim])
        w_h1_x, b_h1_x = create_nn_weights('h1_z_x', 'encoder',
                                           [input_dim, hidden_dim])
        w_h1_y, b_h1_y = create_nn_weights('h1_z_y', 'encoder',
                                           [num_classes, hidden_dim])

        w_h1, b_h1 = create_nn_weights('h1_z', 'encoder',
                                       [hidden_dim, hidden_dim])
        # w_h1, b_h1 = create_nn_weights('h1_z', 'encoder', [latent_dim + num_classes + input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h1_z', 'encoder',
                                       [hidden_dim, hidden_dim])

        w_mu_z, b_mu_z = create_nn_weights('mu_z', 'encoder',
                                           [hidden_dim, latent_dim])
        w_var_z, b_var_z = create_nn_weights('var_z', 'encoder',
                                             [hidden_dim, latent_dim])

        # Hidden layers
        l_qa_to_qz = mlp_neuron(a, w_h1_a, b_h1_a, activation=False)
        l_x_to_qz = mlp_neuron(x, w_h1_x, b_h1_x, activation=False)
        l_y_to_qz = mlp_neuron(y, w_h1_y, b_h1_y, activation=False)

        h1 = normalized_mlp(l_qa_to_qz + l_x_to_qz + l_y_to_qz,
                            w_h1,
                            b_h1,
                            is_training,
                            batch_norm=batch_norm)
        h2 = normalized_mlp(h1, w_h2, b_h2, is_training, batch_norm=batch_norm)
        # Z2 latent layer mu and var
        logvar_z = mlp_neuron(h2, w_var_z, b_var_z, activation=False)
        mu_z = mlp_neuron(h2, w_mu_z, b_mu_z, activation=False)
        z = draw_norm(latent_dim, mu_z, logvar_z)
        return z, mu_z, logvar_z
Ejemplo n.º 7
0
def q_z1_given_x(x, hidden_dim, input_dim, latent_dim, reuse=False):
    with tf.variable_scope("encoder_M1", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_z1', 'encoder', [input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_z1', 'encoder', [hidden_dim, hidden_dim])

        w_mu_z1, b_mu_z1 = create_nn_weights('mu_z1', 'encoder', [hidden_dim, latent_dim])
        w_var_z1, b_var_z1 = create_nn_weights('var_z1', 'encoder', [hidden_dim, latent_dim])

        # Hidden layers
        h1 = mlp_neuron(x, w_h1, b_h1)
        h2 = mlp_neuron(h1, w_h2, b_h2)

        # Z1 latent layer mu and var
        logvar_z1 = mlp_neuron(h2, w_var_z1, b_var_z1, activation=False)
        mu_z1 = mlp_neuron(h2, w_mu_z1, b_mu_z1, activation=False)
        # Model
        z1 = draw_norm(latent_dim, mu_z1, logvar_z1)
        return z1, mu_z1, logvar_z1
Ejemplo n.º 8
0
def pa_given_zy(z,
                y,
                hidden_dim,
                latent_dim,
                num_classes,
                is_training,
                batch_norm,
                reuse=False):
    # Generative p(a|z,y)
    with tf.variable_scope("decoder", reuse=reuse):
        # Variables
        w_h1_z, b_h1_z = create_nn_weights('h1_a_z', 'decoder',
                                           [latent_dim, hidden_dim])
        w_h1_y, b_h1_y = create_nn_weights('h1_a_y', 'decoder',
                                           [num_classes, hidden_dim])

        w_h1, b_h1 = create_nn_weights('h1_a', 'decoder',
                                       [hidden_dim, hidden_dim])
        # w_h1, b_h1 = create_nn_weights('h1_a', 'decoder', [latent_dim + num_classes, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_a', 'decoder',
                                       [hidden_dim, hidden_dim])

        w_mu_a, b_mu_a = create_nn_weights('mu_a', 'decoder',
                                           [hidden_dim, latent_dim])
        w_var_a, b_var_a = create_nn_weights('var_a', 'encoder',
                                             [hidden_dim, latent_dim])
        # Model
        # Decoder hidden layer
        l_y_to_pa = mlp_neuron(y, w_h1_y, b_h1_y, activation=False)
        l_qz_to_pa = mlp_neuron(z, w_h1_z, b_h1_z, activation=False)
        h1 = normalized_mlp(tf.add(l_y_to_pa, l_qz_to_pa),
                            w_h1,
                            b_h1,
                            is_training,
                            batch_norm=batch_norm)
        h2 = normalized_mlp(h1, w_h2, b_h2, is_training, batch_norm=batch_norm)

        # a latent layer mu and var
        logvar_a = mlp_neuron(h2, w_var_a, b_var_a, activation=False)
        mu_a = mlp_neuron(h2, w_mu_a, b_mu_a, activation=False)
        # Model
        a = draw_norm(latent_dim, mu_a, logvar_a)
        return a, mu_a, logvar_a
Ejemplo n.º 9
0
def px_given_z1(z1, hidden_dim, input_dim, latent_dim, reuse=False):
    with tf.variable_scope("decoder_M1", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_x', 'decoder',
                                       [latent_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_x', 'decoder',
                                       [hidden_dim, hidden_dim])

        w_mu, b_mu = create_nn_weights('mu_x', 'decoder',
                                       [hidden_dim, input_dim])
        # Model
        # Decoder hidden layer
        h1 = mlp_neuron(z1, w_h1, b_h1)
        h2 = mlp_neuron(h1, w_h2, b_h2)
        # Reconstruction layer
        # x_mu = mlp_neuron(h2, w_mu, b_mu, activation=False)
        x_mu = tf.nn.sigmoid(mlp_neuron(h2, w_mu, b_mu, activation=False))
        tf.summary.image('x_mu', tf.reshape(x_mu[0], [1, 28, 28, 1]))
        return x_mu
Ejemplo n.º 10
0
def qa_given_x(x, hidden_dim, input_dim, latent_dim, is_training, reuse=False):
    # Auxiliary q(a|x)
    with tf.variable_scope("encoder", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_a', 'encoder',
                                       [input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_a', 'encoder',
                                       [hidden_dim, hidden_dim])

        w_mu_a, b_mu_a = create_nn_weights('mu_a', 'encoder',
                                           [hidden_dim, latent_dim])
        w_var_a, b_var_a = create_nn_weights('var_a', 'encoder',
                                             [hidden_dim, latent_dim])

        # Hidden layers
        h1 = mlp_neuron(x, w_h1, b_h1)
        h2 = mlp_neuron(h1, w_h2, b_h2)

        # a latent layer mu and var
        logvar_a = mlp_neuron(h2, w_var_a, b_var_a, activation=False)
        mu_a = mlp_neuron(h2, w_mu_a, b_mu_a, activation=False)
        # Model
        a = draw_norm(latent_dim, mu_a, logvar_a)
        return a, mu_a, logvar_a