예제 #1
0
    def _objective(self):

        # Labeled
        self.num_lab_batch, self.num_ulab_batch, self.batch_size = get_batch_size(
            num_examples=self.num_examples,
            num_batches=self.num_batches,
            num_lab=self.n_labeled)
        logging.debug(
            "num batches:{}, batch_size:{},  num_lab_batch {}, num_ulab_batch:{}, epochs:{}"
            .format(self.num_batches, self.batch_size, self.num_lab_batch,
                    self.num_ulab_batch,
                    int(self.num_iterations / self.num_batches)))
        self.labeled_ELBO, self.y_lab_logits, self.x_recon_lab_mu, self.classifier_loss, self.y_pred_cls = self.labeled_model(
        )
        if self.n_labeled == self.num_examples:
            self.train_x_l_mu = np.concatenate(
                (self.train_x_l_mu, self.train_x_u_mu), axis=0)
            self.train_x_l_logvar = np.concatenate(
                (self.train_x_l_logvar, self.train_x_u_logvar), axis=0)
            self.train_l_y = np.concatenate((self.train_l_y, self.train_u_y),
                                            axis=0)

            self.cost = (
                (self.total_lab_loss() * self.num_examples) +
                prior_weights()) / (-self.batch_size * self.num_examples)
        else:
            self.unlabeled_ELBO, self.y_ulab_logits = self.unlabeled_model()
            self.cost = ((self.total_lab_loss() + self.total_unlab_loss()) *
                         self.num_examples + prior_weights()) / (
                             -self.batch_size * self.num_examples)
        tf.summary.scalar('cost', self.cost)
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate,
            beta1=self.beta1,
            beta2=self.beta2).minimize(self.cost)
예제 #2
0
 def _objective(self):
     n_train_examples = 50000
     train_x_l, train_l_y, train_u_x, train_u_y, self.valid_x, self.valid_y, self.test_x, self.test_y = extract_data(
         n_train_examples)
     num_batches = int(n_train_examples / self.batch_size)
     logging.debug("num batches:{}, batch_size:{}, epochs:{}".format(
         num_batches, self.batch_size,
         int(self.num_iterations / num_batches)))
     self.train_x = np.concatenate((train_x_l, train_u_x), axis=0)
     self.train_y = np.concatenate((train_l_y, train_u_y), axis=0)
     elbo, self.x_recon_mu, self.z_sample, self.z_mu, self.z_logvar, self.loglik = self.build_model(
     )
     self.cost = (elbo * num_batches +
                  prior_weights()) / (-self.batch_size * num_batches)
     self.optimizer = tf.train.AdamOptimizer(
         learning_rate=self.learning_rate,
         beta1=self.beta1,
         beta2=self.beta2).minimize(self.cost)
예제 #3
0
    def _objective(self):
        self.split_batch_size = int(self.batch_size / 2)
        # TODO clean up batch assignment
        self.num_lab_batch, self.num_ulab_batch = self.split_batch_size, self.split_batch_size
        self.num_batches = self.num_examples / self.batch_size
        logging.debug(
            "num batches:{}, batch_size:{},  num_lab_batch {}, num_ulab_batch:{}, epochs:{}"
            .format(self.num_batches, self.batch_size, self.num_lab_batch,
                    self.num_ulab_batch,
                    int(self.num_iterations / self.num_batches)))
        self.labeled_ELBO, self.y_lab_logits, self.x_recon_lab_mu, self.classifier_loss, \
        self.y_pred_cls = self.labeled_model()
        self.marginal_lik_lab = tf.reduce_mean(self.total_lab_loss())
        if self.n_labeled == self.num_examples:
            self.train_x_l = np.concatenate(
                (self.train_x_l, self.train_u_x, self.valid_x), axis=0)
            self.train_l_y = np.concatenate(
                (self.train_l_y, self.train_u_y, self.valid_y), axis=0)
            # TODO check calculations
            self.total_marg_lik = self.marginal_lik_lab
            loss = "labeled loss"
            print(loss)
            logging.debug(loss)
        else:
            self.unlabeled_ELBO, self.y_ulab_logits = self.unlabeled_model()
            self.total_marg_lik = tf.reduce_mean(self.total_lab_loss() +
                                                 self.total_unlab_loss())
            loss = "labeled + unlabeled loss"
            print(loss)

            logging.debug(loss)
        self.cost = ((self.total_marg_lik) * self.num_examples +
                     prior_weights()) / (-self.num_examples)
        tf.summary.scalar('cost', self.cost)
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate,
            beta1=self.beta1,
            beta2=self.beta2).minimize(self.cost)