Exemplo n.º 1
0
 def train_test(self):
     epochs, best_epoch = self.train_neural_network()
     self.saver.restore(sess=self.session, save_path=self.save_path)
     test_correct, test_cls_pred, test_cost = self.predict_cls(
         mu=self.test_x_mu,
         logvar=self.test_x_logvar,
         labels=self.test_y,
         cls_true=(convert_labels_to_cls(self.test_y)))
     feed_dict = {
         self.x_lab_mu: self.test_x_mu,
         self.x_lab_logvar: self.test_x_logvar,
         self.y_lab: self.test_y
     }
     logits = self.session.run(self.y_lab_logits, feed_dict=feed_dict)
     plot_roc(logits, self.test_y, self.num_classes, name='Conv VAE Class')
     print_test_accuracy(test_correct, test_cls_pred, self.test_y, logging)
     plot_cost(training=self.train_cost,
               validation=self.validation_cost,
               name="Cost",
               epochs=epochs,
               best_epoch=best_epoch)
     plot_line(self.validation_accuracy,
               name='Validation Accuracy',
               epochs=epochs,
               best_epoch=best_epoch)
Exemplo n.º 2
0
    def train_neural_network(self):
        print_training = "Training PCA MLP:"
        print(print_training)
        logging.debug(print_training)
        self.session.run(tf.global_variables_initializer())
        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx = 0

        for i in range(self.num_iterations):
            # Batch Training
            x_batch, y_batch, idx = get_next_batch(self.train_x, self.train_y,
                                                   idx, self.batch_size)
            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict={
                    self.x: x_batch,
                    self.y: y_batch
                })
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                correct, _ = self.predict_cls(images=self.valid_x,
                                              labels=self.valid_y,
                                              cls_true=convert_labels_to_cls(
                                                  self.valid_y))
                acc_validation, _ = cls_accuracy(correct)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                print_opt = "Iteration: {}, Training Loss: {}, " \
                            " Validation Acc:{}, {}".format(i + 1, batch_loss, acc_validation, improved_str)
                print(print_opt)
                logging.debug(print_opt)
            if i - last_improvement > self.require_improvement:
                print_imp = "No improvement found in a while, stopping optimization."
                print(print_imp)
                logging.debug(print_imp)
                # Break out from the for-loop.
                break
                # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print_time = "Time usage: " + str(
            timedelta(seconds=int(round(time_dif))))
        print(print_time)
        logging.debug(print_time)
Exemplo n.º 3
0
 def train_test(self):
     self.train_neural_network()
     self.saver.restore(sess=self.session, save_path=self.save_path)
     correct, cls_pred, _ = self.predict_cls(
         images=self.test_x,
         labels=self.test_y,
         cls_true=(convert_labels_to_cls(self.test_y)))
     print_test_accuracy(correct, cls_pred, self.test_y, logging)
     self.test_reconstruction()
    def train_test(self):
        self.train_neural_network()
        self.saver.restore(sess=self.session, save_path=self.save_path)
        correct, cls_pred = self.predict_cls(images=self.test_x,
                                             labels=self.test_y,
                                             cls_true=(convert_labels_to_cls(self.test_y)))

        feed_dict = {self.x: self.test_x, self.y: self.test_y}
        logits = self.session.run(self.y_logits, feed_dict=feed_dict)
        plot_roc(logits, self.test_y, self.num_classes, name='MLP')
        print_test_accuracy(correct, cls_pred, self.test_y, logging)
Exemplo n.º 5
0
 def train_test(self):
     self.train_neural_network()
     self.saver.restore(sess=self.session, save_path=self.save_path)
     correct, cls_pred, test_marg_lik = self.predict_cls(
         images=self.test_x,
         labels=self.test_y,
         cls_true=(convert_labels_to_cls(self.test_y)))
     feed_dict = {
         self.x_lab: self.test_x,
         self.y_lab: self.test_y,
         self.is_training: False
     }
     marg_print = "test marginal_likelihood:{}".format(test_marg_lik)
     print(marg_print)
     logging.debug(marg_print)
     print_test_accuracy(correct, cls_pred, self.test_y, logging)
     logits = self.session.run(self.y_lab_logits, feed_dict=feed_dict)
     plot_roc(logits, self.test_y, self.num_classes, name='auxiliary')
     self.test_reconstruction()
Exemplo n.º 6
0
    def train_neural_network(self):
        if self.restore_vae:
            self.saver.restore(sess=self.session, save_path=self.save_path)
        else:
            self.train_vae()
        print("Training Pre_trained Semi_Supervised VAE:")
        logging.debug("Training Pre_trained Semi_Supervised VAE:")

        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx_labeled = 0
        idx_unlabeled = 0
        idx = 0

        for i in range(self.num_iterations):

            # Batch Training
            # Batch Training
            x_batch, _, idx = get_next_batch(self.train_x, self.train_y, idx,
                                             self.batch_size)
            x_l_batch, y_l_batch, idx_labeled = get_next_batch(
                self.train_x_l, self.train_l_y, idx_labeled,
                self.num_lab_batch)
            x_u_batch, _, idx_unlabeled = get_next_batch(
                self.train_u_x, self.train_u_y, idx_unlabeled,
                self.num_ulab_batch)
            feed_dict_train = {
                self.x: x_batch,
                self.x_lab: x_l_batch,
                self.y_lab: y_l_batch,
                self.x_unlab: x_u_batch
            }

            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict=feed_dict_train)
            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                correct, _, log_lik = self.predict_cls(
                    images=self.valid_x,
                    labels=self.valid_y,
                    cls_true=convert_labels_to_cls(self.valid_y))
                acc_validation, _ = cls_accuracy(correct)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                print("Iteration: {}, Training Loss: {}, "
                      " Validation:  log_lik {},  Acc {}, {}".format(
                          i + 1, int(batch_loss), int(log_lik), acc_validation,
                          improved_str))
                logging.debug("Iteration: {}, Training Loss: {}, "
                              " Validation:  log_lik {},  Acc {}, {}".format(
                                  i + 1, int(batch_loss), int(log_lik),
                                  acc_validation, improved_str))
            if i - last_improvement > self.require_improvement:
                print(
                    "No improvement found in a while, stopping optimization.")
                # Break out from the for-loop.
                break
        # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
        logging.debug("Time usage: " +
                      str(timedelta(seconds=int(round(time_dif)))))
Exemplo n.º 7
0
    def train_neural_network(self):
        train_print = "Training Conv VAE Model:"
        params_print = "Parameters: filter_sizes:{}, num_filters:{}, learning_rate:{}," \
                       " momentum: beta1={} beta2={}, batch_size:{}, batch_norm:{}," \
                       " latent_dim:{} num_of_batches:{}, keep_prob:{}, fc_size:{}, require_improvement:{}" \
            .format(self.filter_sizes, self.num_filters, self.learning_rate, self.beta1, self.beta2,
                    self.batch_size, self.batch_norm, self.latent_dim, self.num_batches, self.keep_prob,
                    self.fc_size, self.require_improvement)
        print(train_print)
        print(params_print)
        logging.debug(train_print)
        logging.debug(params_print)
        self.session.run(tf.global_variables_initializer())
        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx_labeled = 0
        idx_unlabeled = 0
        epochs = 0
        for i in range(self.num_iterations):

            # Batch Training
            x_l_mu, x_l_logvar, y_l_batch, idx_labeled = get_encoded_next_batch(
                self.train_x_l_mu, self.train_x_l_logvar, self.train_l_y,
                idx_labeled, self.num_lab_batch)
            x_u_mu, x_u_logvar, _, idx_unlabeled = get_encoded_next_batch(
                self.train_x_u_mu, self.train_x_u_logvar, self.train_u_y,
                idx_unlabeled, self.num_ulab_batch)
            feed_dict_train = {
                self.x_lab_mu: x_l_mu,
                self.y_lab: y_l_batch,
                self.x_unlab_mu: x_u_mu,
                self.x_lab_logvar: x_l_logvar,
                self.x_unlab_logvar: x_u_logvar
            }
            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict=feed_dict_train)
            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)
            # Batch Trainin
            if idx_labeled + idx_unlabeled == self.num_examples:
                epochs += 1
                is_epoch = True
            else:
                is_epoch = False

            if is_epoch or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                valid_correct, _, valid_cost = self.predict_cls(
                    mu=self.valid_x_mu,
                    logvar=self.valid_x_logvar,
                    labels=self.valid_y,
                    cls_true=convert_labels_to_cls(self.valid_y))
                acc_validation, _ = cls_accuracy(valid_correct)
                self.validation_accuracy.append(acc_validation)
                self.validation_cost.append(valid_cost)
                self.train_cost.append(batch_loss)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                print("Iteration: {}, Training Loss: {}, "
                      " Validation Acc:{}, {}".format(i + 1, batch_loss,
                                                      acc_validation,
                                                      improved_str))
                logging.debug("Iteration: {}, Training Loss: {}, "
                              " Validation Acc:{}, {}".format(
                                  i + 1, batch_loss, acc_validation,
                                  improved_str))
            if i - last_improvement > self.require_improvement:
                print(
                    "No improvement found in a while, stopping optimization.")
                logging.debug(
                    "No improvement found in a while, stopping optimization.")
                # Break out from the for-loop.
                break
        # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
        logging.debug("Time usage: " +
                      str(timedelta(seconds=int(round(time_dif)))))
        return epochs, last_improvement
Exemplo n.º 8
0
    def train_neural_network(self):
        train_print = "Training Auxiliary VAE:"
        print(train_print)
        logging.debug(train_print)
        self.session.run(tf.global_variables_initializer())

        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx_labeled = 0
        idx_unlabeled = 0

        for i in range(self.num_iterations):

            # Batch Training
            x_l_batch, y_l_batch, idx_labeled = get_next_batch(
                self.train_x_l, self.train_l_y, idx_labeled,
                self.num_lab_batch)
            x_u_batch, _, idx_unlabeled = get_next_batch(
                self.train_u_x, self.train_u_y, idx_unlabeled,
                self.num_ulab_batch)
            feed_dict_train = {
                self.x_lab: x_l_batch,
                self.y_lab: y_l_batch,
                self.x_unlab: x_u_batch,
                self.is_training: True
            }

            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict=feed_dict_train)
            train_correct, _, batch_marg_lik_lab = self.predict_cls(
                images=x_l_batch,
                labels=y_l_batch,
                cls_true=convert_labels_to_cls(y_l_batch))
            acc_train, _ = cls_accuracy(train_correct)

            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                correct, _, val_marg_lik = self.predict_cls(
                    images=self.test_x,
                    labels=self.test_y,
                    cls_true=convert_labels_to_cls(self.test_y))
                acc_validation, _ = cls_accuracy(correct)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                optimization_print = "Iteration: {}, Training Loss: {} Acc:{} marg_lik: {} " \
                                     " Validation Acc:{} marg_lik: {} , {}".format(i + 1, int(batch_loss), acc_train,
                                                                                   batch_marg_lik_lab,
                                                                                   acc_validation, val_marg_lik,
                                                                                   improved_str)
                print(optimization_print)
                logging.debug(optimization_print)
                # if i - last_improvement > self.require_improvement:
                #     print("No improvement found in a while, stopping optimization.")
                #     # Break out from the for-loop.
                #     break
        # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        time_dif_print = "Time usage: " + str(
            timedelta(seconds=int(round(time_dif))))
        print(time_dif_print)
        logging.debug(time_dif_print)