Beispiel #1
0
def pz1_given_z2y(y, z2, num_channels, filter_sizes, num_filters, input_dim, fc_size, reuse=False):
    with tf.variable_scope("decoder_M2", reuse=reuse):
        # Variables
        z2y = tf.concat([y, z2], axis=1)
        expanded_z2y = tf.expand_dims(tf.expand_dims(z2y, 1), 1)
        print("filter_sizes:{}".format(filter_sizes))
        layer_conv1, weights_conv1 = conv_layer(input=expanded_z2y, num_input_channels=num_channels,
                                                filter_size=filter_sizes[0],
                                                num_filters=num_filters[0], use_pooling=True, layer_name='layer1')
        print("layer conv1: {}".format(layer_conv1))
        # ### Convolutional Layer 2
        layer_conv2, weights_conv2 = conv_layer(input=layer_conv1, num_input_channels=num_filters[0],
                                                filter_size=filter_sizes[1], num_filters=num_filters[1],
                                                use_pooling=True, layer_name='layer2')
        print("layer conv2: {}".format(layer_conv2))

        # ### Flatten Layer
        layer_flat, num_features = flatten_layer(layer_conv2)
        print("layer flat: {}".format(layer_flat))
        print("num_features: {}".format(num_features))

        # ### Fully-Connected Layer 1
        layer_fc1 = fc_layer(input=layer_flat, num_inputs=num_features, num_outputs=fc_size, use_relu=True)
        print("layer fc1: {}".format(layer_fc1))

        # x_mu = mlp_neuron(h2, w_mu, b_mu, activation=False)
        z1_logvar = fc_layer(input=layer_fc1, num_inputs=fc_size, num_outputs=input_dim, use_relu=False)
        z1_mu = fc_layer(input=layer_fc1, num_inputs=fc_size, num_outputs=input_dim, use_relu=False)
        z1 = draw_norm(input_dim, z1_mu, z1_logvar)

        return z1, z1_mu, z1_logvar
Beispiel #2
0
 def labeled_model(self):
     x_lab = draw_norm(dim=self.latent_dim,
                       mu=self.x_lab_mu,
                       logvar=self.x_lab_logvar)
     z, z_mu, z_logvar = q_z2_given_z1y(z1=x_lab,
                                        y=self.y_lab,
                                        latent_dim=self.latent_dim,
                                        input_dim=self.input_dim,
                                        filter_sizes=self.filter_sizes,
                                        fc_size=self.fc_size,
                                        num_channels=1,
                                        num_filters=self.num_filters)
     logits = qy_given_z1(z1=x_lab,
                          num_classes=self.num_classes,
                          filter_sizes=self.filter_sizes,
                          fc_size=self.fc_size,
                          num_channels=1,
                          num_filters=self.num_filters)
     x, x_mu, x_logvar = pz1_given_z2y(
         y=self.y_lab,
         z2=z,
         input_dim=self.input_dim,
         filter_sizes=[self.filter_sizes[1], self.filter_sizes[0]],
         fc_size=self.fc_size,
         num_channels=1,
         num_filters=[self.num_filters[1], self.num_filters[0]])
     elbo = elbo_M2(z1_recon=[x_mu, x_logvar],
                    z1=x_lab,
                    y=self.y_lab,
                    z2=[z, z_mu, z_logvar])
     classifier_loss, y_pred_cls = softmax_classifier(logits=logits,
                                                      y_true=self.y_lab)
     return elbo, logits, x_mu, classifier_loss, y_pred_cls
Beispiel #3
0
def q_z2_given_z1y(z1,
                   y,
                   latent_dim,
                   input_dim,
                   fc_size,
                   filter_sizes,
                   num_channels,
                   num_filters,
                   reuse=False):
    with tf.variable_scope("encoder_M2", reuse=reuse):
        z1y = tf.concat([y, z1], axis=1)
        expanded_z1y = tf.expand_dims(tf.expand_dims(z1y, 1), 1)
        print("filter_sizes:{}".format(filter_sizes))
        layer_conv1, weights_conv1 = conv_layer(
            input=expanded_z1y,
            num_input_channels=num_channels,
            filter_size=filter_sizes[0],
            num_filters=num_filters[0],
            use_pooling=True,
            layer_name='layer1')
        print("layer conv1: {}".format(layer_conv1))
        # ### Convolutional Layer 2
        layer_conv2, weights_conv2 = conv_layer(
            input=layer_conv1,
            num_input_channels=num_filters[0],
            filter_size=filter_sizes[1],
            num_filters=num_filters[1],
            use_pooling=True,
            layer_name='layer2')
        print("layer conv2: {}".format(layer_conv2))

        # ### Flatten Layer
        layer_flat, num_features = flatten_layer(layer_conv2)
        print("layer flat: {}".format(layer_flat))
        print("num_features: {}".format(num_features))

        # ### Fully-Connected Layer 1
        layer_fc1 = fc_layer(input=layer_flat,
                             num_inputs=num_features,
                             num_outputs=fc_size,
                             use_relu=True)
        print("layer fc1: {}".format(layer_fc1))

        logvar_z2 = fc_layer(input=layer_fc1,
                             num_inputs=fc_size,
                             num_outputs=input_dim,
                             use_relu=False)
        mu_z2 = fc_layer(input=layer_fc1,
                         num_inputs=fc_size,
                         num_outputs=input_dim,
                         use_relu=False)
        z2 = draw_norm(latent_dim, mu_z2, logvar_z2)

        return z2, mu_z2, logvar_z2
Beispiel #4
0
def q_z1_given_x(x,
                 num_channels,
                 filter_sizes,
                 num_filters,
                 latent_dim,
                 fc_size,
                 reuse=False):
    with tf.variable_scope("encoder_M1", reuse=reuse):
        layer_conv1, weights_conv1 = conv_layer(
            input=x,
            num_input_channels=num_channels,
            filter_size=filter_sizes[0],
            num_filters=num_filters[0],
            use_pooling=True,
            layer_name='layer1')
        print("layer conv1: {}".format(layer_conv1))

        # ### Convolutional Layer 2
        layer_conv2, weights_conv2 = conv_layer(
            input=layer_conv1,
            num_input_channels=num_filters[0],
            filter_size=filter_sizes[1],
            num_filters=num_filters[1],
            use_pooling=True,
            layer_name='layer2')
        print("layer conv2: {}".format(layer_conv2))

        # ### Flatten Layer
        layer_flat, num_features = flatten_layer(layer_conv2)
        print("layer flat: {}".format(layer_flat))
        print("num_features: {}".format(num_features))

        # ### Fully-Connected Layer 1
        layer_fc1 = fc_layer(input=layer_flat,
                             num_inputs=num_features,
                             num_outputs=fc_size,
                             use_relu=True)
        print("layer fc1: {}".format(layer_fc1))

        # ### Fully-Connected Layer 2
        logvar_z1 = fc_layer(input=layer_fc1,
                             num_inputs=fc_size,
                             num_outputs=latent_dim,
                             use_relu=False)
        mu_z1 = fc_layer(input=layer_fc1,
                         num_inputs=fc_size,
                         num_outputs=latent_dim,
                         use_relu=False)

        # Model
        z1 = draw_norm(latent_dim, mu_z1, logvar_z1)
        return z1, mu_z1, logvar_z1
Beispiel #5
0
 def labeled_model(self):
     x_lab = draw_norm(dim=self.latent_dim, mu=self.x_lab_mu, logvar=self.x_lab_logvar)
     z, z_mu, z_logvar = q_z2_given_z1y(z1=x_lab, y=self.y_lab, latent_dim=self.latent_dim,
                                        num_classes=self.num_classes, hidden_dim=self.hidden_dim,
                                        input_dim=self.input_dim)
     logits = qy_given_z1(z1=x_lab, input_dim=self.input_dim,
                          num_classes=self.num_classes, hidden_dim=self.hidden_dim)
     x, x_mu, x_logvar = pz1_given_z2y(y=self.y_lab, z2=z, latent_dim=self.latent_dim,
                                       num_classes=self.num_classes, hidden_dim=self.hidden_dim,
                                       input_dim=self.input_dim, )
     elbo = elbo_M2(z1_recon=[x_mu, x_logvar], z1=x_lab, y=self.y_lab, z2=[z, z_mu, z_logvar])
     classifier_loss, y_pred_cls = softmax_classifier(logits=logits, y_true=self.y_lab)
     return elbo, logits, x_mu, classifier_loss, y_pred_cls
Beispiel #6
0
def q_z2_given_z1y(z1, y, latent_dim, num_classes, hidden_dim, input_dim, reuse=False):
    with tf.variable_scope("encoder_M2", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_z2', 'encoder',
                                       [input_dim + num_classes, hidden_dim])

        w_mu_z2, b_mu_z2 = create_nn_weights('mu_z2', 'encoder', [hidden_dim, latent_dim])
        w_var_z2, b_var_z2 = create_nn_weights('var_z2', 'encoder', [hidden_dim, latent_dim])

        # Hidden layers
        h1 = mlp_neuron(tf.concat([z1, y], axis=1), w_h1, b_h1)
        # Z2 latent layer mu and var
        logvar_z2 = mlp_neuron(h1, w_var_z2, b_var_z2, activation=False)
        mu_z2 = mlp_neuron(h1, w_mu_z2, b_mu_z2, activation=False)
        z2 = draw_norm(latent_dim, mu_z2, logvar_z2)
        return z2, mu_z2, logvar_z2
Beispiel #7
0
def qz_given_ayx(a,
                 y,
                 x,
                 latent_dim,
                 num_classes,
                 hidden_dim,
                 input_dim,
                 is_training,
                 batch_norm,
                 reuse=False):
    # Recognition q(z|x,a,y)
    with tf.variable_scope("encoder", reuse=reuse):
        # Variables
        w_h1_a, b_h1_a = create_nn_weights('h1_z_a', 'encoder',
                                           [latent_dim, hidden_dim])
        w_h1_x, b_h1_x = create_nn_weights('h1_z_x', 'encoder',
                                           [input_dim, hidden_dim])
        w_h1_y, b_h1_y = create_nn_weights('h1_z_y', 'encoder',
                                           [num_classes, hidden_dim])

        w_h1, b_h1 = create_nn_weights('h1_z', 'encoder',
                                       [hidden_dim, hidden_dim])
        # w_h1, b_h1 = create_nn_weights('h1_z', 'encoder', [latent_dim + num_classes + input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h1_z', 'encoder',
                                       [hidden_dim, hidden_dim])

        w_mu_z, b_mu_z = create_nn_weights('mu_z', 'encoder',
                                           [hidden_dim, latent_dim])
        w_var_z, b_var_z = create_nn_weights('var_z', 'encoder',
                                             [hidden_dim, latent_dim])

        # Hidden layers
        l_qa_to_qz = mlp_neuron(a, w_h1_a, b_h1_a, activation=False)
        l_x_to_qz = mlp_neuron(x, w_h1_x, b_h1_x, activation=False)
        l_y_to_qz = mlp_neuron(y, w_h1_y, b_h1_y, activation=False)

        h1 = normalized_mlp(l_qa_to_qz + l_x_to_qz + l_y_to_qz,
                            w_h1,
                            b_h1,
                            is_training,
                            batch_norm=batch_norm)
        h2 = normalized_mlp(h1, w_h2, b_h2, is_training, batch_norm=batch_norm)
        # Z2 latent layer mu and var
        logvar_z = mlp_neuron(h2, w_var_z, b_var_z, activation=False)
        mu_z = mlp_neuron(h2, w_mu_z, b_mu_z, activation=False)
        z = draw_norm(latent_dim, mu_z, logvar_z)
        return z, mu_z, logvar_z
Beispiel #8
0
    def unlabeled_model(self):
        # Ulabeled
        x_unlab = draw_norm(dim=self.latent_dim,
                            mu=self.x_unlab_mu,
                            logvar=self.x_unlab_logvar)
        logits = qy_given_z1(x_unlab,
                             num_classes=self.num_classes,
                             reuse=True,
                             filter_sizes=self.filter_sizes,
                             fc_size=self.fc_size,
                             num_channels=1,
                             num_filters=self.num_filters)
        elbo = []
        for label in range(self.num_classes):
            y_ulab = one_label_tensor(label, self.num_ulab_batch,
                                      self.num_classes)
            z, z_mu, z_logvar = q_z2_given_z1y(z1=x_unlab,
                                               y=y_ulab,
                                               latent_dim=self.latent_dim,
                                               input_dim=self.input_dim,
                                               reuse=True,
                                               filter_sizes=self.filter_sizes,
                                               fc_size=self.fc_size,
                                               num_channels=1,
                                               num_filters=self.num_filters)
            x, x_mu, x_logvar = pz1_given_z2y(
                y=y_ulab,
                z2=z,
                input_dim=self.input_dim,
                reuse=True,
                filter_sizes=[self.filter_sizes[1], self.filter_sizes[0]],
                fc_size=self.fc_size,
                num_channels=1,
                num_filters=[self.num_filters[1], self.num_filters[0]])

            class_elbo = elbo_M2(z1_recon=[x_mu, x_logvar],
                                 z1=x_unlab,
                                 y=y_ulab,
                                 z2=[z, z_mu, z_logvar])
            elbo.append(class_elbo)
        elbo = tf.convert_to_tensor(elbo)
        print("unlabeled class_elbo:{}".format(elbo))
        return tf.transpose(elbo), logits
def q_z1_given_x(x, hidden_dim, input_dim, latent_dim, reuse=False):
    with tf.variable_scope("encoder_M1", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_z1', 'encoder', [input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_z1', 'encoder', [hidden_dim, hidden_dim])

        w_mu_z1, b_mu_z1 = create_nn_weights('mu_z1', 'encoder', [hidden_dim, latent_dim])
        w_var_z1, b_var_z1 = create_nn_weights('var_z1', 'encoder', [hidden_dim, latent_dim])

        # Hidden layers
        h1 = mlp_neuron(x, w_h1, b_h1)
        h2 = mlp_neuron(h1, w_h2, b_h2)

        # Z1 latent layer mu and var
        logvar_z1 = mlp_neuron(h2, w_var_z1, b_var_z1, activation=False)
        mu_z1 = mlp_neuron(h2, w_mu_z1, b_mu_z1, activation=False)
        # Model
        z1 = draw_norm(latent_dim, mu_z1, logvar_z1)
        return z1, mu_z1, logvar_z1
def pa_given_zy(z,
                y,
                hidden_dim,
                latent_dim,
                num_classes,
                is_training,
                batch_norm,
                reuse=False):
    # Generative p(a|z,y)
    with tf.variable_scope("decoder", reuse=reuse):
        # Variables
        w_h1_z, b_h1_z = create_nn_weights('h1_a_z', 'decoder',
                                           [latent_dim, hidden_dim])
        w_h1_y, b_h1_y = create_nn_weights('h1_a_y', 'decoder',
                                           [num_classes, hidden_dim])

        w_h1, b_h1 = create_nn_weights('h1_a', 'decoder',
                                       [hidden_dim, hidden_dim])
        # w_h1, b_h1 = create_nn_weights('h1_a', 'decoder', [latent_dim + num_classes, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_a', 'decoder',
                                       [hidden_dim, hidden_dim])

        w_mu_a, b_mu_a = create_nn_weights('mu_a', 'decoder',
                                           [hidden_dim, latent_dim])
        w_var_a, b_var_a = create_nn_weights('var_a', 'encoder',
                                             [hidden_dim, latent_dim])
        # Model
        # Decoder hidden layer
        l_y_to_pa = mlp_neuron(y, w_h1_y, b_h1_y, activation=False)
        l_qz_to_pa = mlp_neuron(z, w_h1_z, b_h1_z, activation=False)
        h1 = normalized_mlp(tf.add(l_y_to_pa, l_qz_to_pa),
                            w_h1,
                            b_h1,
                            is_training,
                            batch_norm=batch_norm)
        h2 = normalized_mlp(h1, w_h2, b_h2, is_training, batch_norm=batch_norm)

        # a latent layer mu and var
        logvar_a = mlp_neuron(h2, w_var_a, b_var_a, activation=False)
        mu_a = mlp_neuron(h2, w_mu_a, b_mu_a, activation=False)
        # Model
        a = draw_norm(latent_dim, mu_a, logvar_a)
        return a, mu_a, logvar_a
Beispiel #11
0
def qa_given_x(x, hidden_dim, input_dim, latent_dim, is_training, reuse=False):
    # Auxiliary q(a|x)
    with tf.variable_scope("encoder", reuse=reuse):
        # Variables
        w_h1, b_h1 = create_nn_weights('h1_a', 'encoder',
                                       [input_dim, hidden_dim])
        w_h2, b_h2 = create_nn_weights('h2_a', 'encoder',
                                       [hidden_dim, hidden_dim])

        w_mu_a, b_mu_a = create_nn_weights('mu_a', 'encoder',
                                           [hidden_dim, latent_dim])
        w_var_a, b_var_a = create_nn_weights('var_a', 'encoder',
                                             [hidden_dim, latent_dim])

        # Hidden layers
        h1 = mlp_neuron(x, w_h1, b_h1)
        h2 = mlp_neuron(h1, w_h2, b_h2)

        # a latent layer mu and var
        logvar_a = mlp_neuron(h2, w_var_a, b_var_a, activation=False)
        mu_a = mlp_neuron(h2, w_mu_a, b_mu_a, activation=False)
        # Model
        a = draw_norm(latent_dim, mu_a, logvar_a)
        return a, mu_a, logvar_a