예제 #1
0
    def train(self):
        print("Training Vanilla VAE:")
        logging.debug("Training Vanilla VAE:")
        self.session.run(tf.global_variables_initializer())
        best_validation_loss = 1e20
        last_improvement = 0

        start_time = time.time()
        idx = 0

        for i in range(self.num_iterations):
            # Batch Training
            x_batch, _, idx = get_next_batch(self.train_x, self.train_y, idx,
                                             self.batch_size)
            summary, batch_loss, log_lik, _ = self.session.run(
                [self.merged, self.cost, self.loglik, self.optimizer],
                feed_dict={self.x: x_batch})
            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy

                validation_loss, val_log_lik = self.validation_loss(
                    images=self.valid_x)
                if validation_loss < best_validation_loss:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_loss = validation_loss
                    last_improvement = i
                    improved_str = '*'

                else:
                    improved_str = ''

                print("Iteration: {}, Training:  Loss {}, log_lik {}"
                      " Validation: Loss {}, log_lik {} {}".format(
                          i + 1, int(batch_loss), int(log_lik),
                          int(validation_loss), int(val_log_lik),
                          improved_str))
                logging.debug("Iteration: {}, Training:  Loss {}, log_lik {}"
                              " Validation: Loss {}, log_lik {} {}".format(
                                  i + 1, int(batch_loss), int(log_lik),
                                  int(validation_loss), int(val_log_lik),
                                  improved_str))
            if i - last_improvement > self.require_improvement:
                print(
                    "No improvement found in a while, stopping optimization.")
                logging.debug(
                    "No improvement found in a while, stopping optimization.")
                # Break o    ut from the for-loop.
                break  # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
        logging.debug("Time usage: " +
                      str(timedelta(seconds=int(round(time_dif)))))
예제 #2
0
    def train_neural_network(self):
        print_training = "Training PCA MLP:"
        print(print_training)
        logging.debug(print_training)
        self.session.run(tf.global_variables_initializer())
        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx = 0

        for i in range(self.num_iterations):
            # Batch Training
            x_batch, y_batch, idx = get_next_batch(self.train_x, self.train_y,
                                                   idx, self.batch_size)
            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict={
                    self.x: x_batch,
                    self.y: y_batch
                })
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                correct, _ = self.predict_cls(images=self.valid_x,
                                              labels=self.valid_y,
                                              cls_true=convert_labels_to_cls(
                                                  self.valid_y))
                acc_validation, _ = cls_accuracy(correct)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                print_opt = "Iteration: {}, Training Loss: {}, " \
                            " Validation Acc:{}, {}".format(i + 1, batch_loss, acc_validation, improved_str)
                print(print_opt)
                logging.debug(print_opt)
            if i - last_improvement > self.require_improvement:
                print_imp = "No improvement found in a while, stopping optimization."
                print(print_imp)
                logging.debug(print_imp)
                # Break out from the for-loop.
                break
                # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print_time = "Time usage: " + str(
            timedelta(seconds=int(round(time_dif))))
        print(print_time)
        logging.debug(print_time)
예제 #3
0
    def train_neural_network(self):
        if self.restore_vae:
            self.saver.restore(sess=self.session, save_path=self.save_path)
        else:
            self.train_vae()
        print("Training Pre_trained Semi_Supervised VAE:")
        logging.debug("Training Pre_trained Semi_Supervised VAE:")

        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx_labeled = 0
        idx_unlabeled = 0
        idx = 0

        for i in range(self.num_iterations):

            # Batch Training
            # Batch Training
            x_batch, _, idx = get_next_batch(self.train_x, self.train_y, idx,
                                             self.batch_size)
            x_l_batch, y_l_batch, idx_labeled = get_next_batch(
                self.train_x_l, self.train_l_y, idx_labeled,
                self.num_lab_batch)
            x_u_batch, _, idx_unlabeled = get_next_batch(
                self.train_u_x, self.train_u_y, idx_unlabeled,
                self.num_ulab_batch)
            feed_dict_train = {
                self.x: x_batch,
                self.x_lab: x_l_batch,
                self.y_lab: y_l_batch,
                self.x_unlab: x_u_batch
            }

            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict=feed_dict_train)
            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                correct, _, log_lik = self.predict_cls(
                    images=self.valid_x,
                    labels=self.valid_y,
                    cls_true=convert_labels_to_cls(self.valid_y))
                acc_validation, _ = cls_accuracy(correct)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                print("Iteration: {}, Training Loss: {}, "
                      " Validation:  log_lik {},  Acc {}, {}".format(
                          i + 1, int(batch_loss), int(log_lik), acc_validation,
                          improved_str))
                logging.debug("Iteration: {}, Training Loss: {}, "
                              " Validation:  log_lik {},  Acc {}, {}".format(
                                  i + 1, int(batch_loss), int(log_lik),
                                  acc_validation, improved_str))
            if i - last_improvement > self.require_improvement:
                print(
                    "No improvement found in a while, stopping optimization.")
                # Break out from the for-loop.
                break
        # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
        logging.debug("Time usage: " +
                      str(timedelta(seconds=int(round(time_dif)))))
예제 #4
0
    def train_neural_network(self):
        train_print = "Training Auxiliary VAE:"
        print(train_print)
        logging.debug(train_print)
        self.session.run(tf.global_variables_initializer())

        best_validation_accuracy = 0
        last_improvement = 0

        start_time = time.time()
        idx_labeled = 0
        idx_unlabeled = 0

        for i in range(self.num_iterations):

            # Batch Training
            x_l_batch, y_l_batch, idx_labeled = get_next_batch(
                self.train_x_l, self.train_l_y, idx_labeled,
                self.num_lab_batch)
            x_u_batch, _, idx_unlabeled = get_next_batch(
                self.train_u_x, self.train_u_y, idx_unlabeled,
                self.num_ulab_batch)
            feed_dict_train = {
                self.x_lab: x_l_batch,
                self.y_lab: y_l_batch,
                self.x_unlab: x_u_batch,
                self.is_training: True
            }

            summary, batch_loss, _ = self.session.run(
                [self.merged, self.cost, self.optimizer],
                feed_dict=feed_dict_train)
            train_correct, _, batch_marg_lik_lab = self.predict_cls(
                images=x_l_batch,
                labels=y_l_batch,
                cls_true=convert_labels_to_cls(y_l_batch))
            acc_train, _ = cls_accuracy(train_correct)

            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)

            if (i % 100 == 0) or (i == (self.num_iterations - 1)):
                # Calculate the accuracy
                correct, _, val_marg_lik = self.predict_cls(
                    images=self.test_x,
                    labels=self.test_y,
                    cls_true=convert_labels_to_cls(self.test_y))
                acc_validation, _ = cls_accuracy(correct)
                if acc_validation > best_validation_accuracy:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_accuracy = acc_validation
                    last_improvement = i
                    improved_str = '*'
                else:
                    improved_str = ''

                optimization_print = "Iteration: {}, Training Loss: {} Acc:{} marg_lik: {} " \
                                     " Validation Acc:{} marg_lik: {} , {}".format(i + 1, int(batch_loss), acc_train,
                                                                                   batch_marg_lik_lab,
                                                                                   acc_validation, val_marg_lik,
                                                                                   improved_str)
                print(optimization_print)
                logging.debug(optimization_print)
                # if i - last_improvement > self.require_improvement:
                #     print("No improvement found in a while, stopping optimization.")
                #     # Break out from the for-loop.
                #     break
        # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        time_dif_print = "Time usage: " + str(
            timedelta(seconds=int(round(time_dif))))
        print(time_dif_print)
        logging.debug(time_dif_print)
    def train(self):
        train_print = "Training Conv VAE Model:"
        params_print = "Parameters: filter_sizes:{}, num_filters:{}, learning_rate:{}," \
                       " momentum: beta1={} beta2={}, batch_size:{}, batch_norm:{}," \
                       " latent_dim:{} num_of_batches:{}, keep_prob:{}, fc_size:{}, require_improvement:{}" \
            .format(self.filter_sizes, self.num_filters, self.learning_rate, self.beta1, self.beta2,
                    self.batch_size, self.batch_norm, self.latent_dim, self.num_batches, self.keep_prob,
                    self.fc_size, self.require_improvement)
        print(train_print)
        print(params_print)
        logging.debug(train_print)
        logging.debug(params_print)
        self.session.run(tf.global_variables_initializer())
        best_validation_loss = 1e20
        last_improvement = 0

        start_time = time.time()
        idx = 0
        epochs = 0
        for i in range(self.num_iterations):
            # Batch Training
            x_batch, _, idx = get_next_batch(self.train_x, self.train_y, idx,
                                             self.batch_size)
            summary, batch_loss, batch_log_lik, _ = self.session.run(
                [self.merged, self.cost, self.loglik, self.optimizer],
                feed_dict={self.x: x_batch})
            # Batch Trainin
            if idx == self.num_examples:
                epochs += 1
                is_epoch = True
            else:
                is_epoch = False
            # print("Optimization Iteration: {}, Training Loss: {}".format(i, batch_loss))
            self.train_writer.add_summary(summary, i)

            if (is_epoch) or (i == (self.num_iterations - 1)):
                validation_loss, val_log_lik = self.validation_loss(
                    images=self.valid_x)
                self.train_log_lik.append(batch_log_lik)
                self.train_cost.append(batch_loss)
                self.validation_cost.append(validation_loss)
                self.validation_log_lik.append(val_log_lik)
                # Calculate the accuracy
                if validation_loss < best_validation_loss:
                    # Save  Best Perfoming all variables of the TensorFlow graph to file.
                    self.saver.save(sess=self.session,
                                    save_path=self.save_path)
                    # update best validation accuracy
                    best_validation_loss = validation_loss
                    last_improvement = i
                    improved_str = '*'

                else:
                    improved_str = ''

                print("Epochs: {}, Training:  Loss {}, batch_log_lik {}"
                      " Validation: Loss {}, batch_log_lik {} {}".format(
                          epochs, int(batch_loss), int(batch_log_lik),
                          int(validation_loss), int(val_log_lik),
                          improved_str))
                logging.debug(
                    "Iteration: {}, Training:  Loss {}, batch_log_lik {}"
                    " Validation: Loss {}, batch_log_lik {} {}".format(
                        i + 1, int(batch_loss), int(batch_log_lik),
                        int(validation_loss), int(val_log_lik), improved_str))
            if i - last_improvement > self.require_improvement:
                print(
                    "No improvement found in a while, stopping optimization.")
                logging.debug(
                    "No improvement found in a while, stopping optimization.")
                # Break o    ut from the for-loop.
                break  # Ending time.
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
        logging.debug("Time usage: " +
                      str(timedelta(seconds=int(round(time_dif)))))
        return epochs, last_improvement