"../normalizedData/trainingSetY.npy", "../normalizedData/testSetX.npy", "../normalizedData/testSetY.npy") model = Model([40]) log_model = LogisticRegressionModel() soft_model = SoftmaxRegressionModel() plot_x = list() plot_y = list() plot_y_log = list() plot_y_soft = list() with tf.Session() as session: session.run(tf.global_variables_initializer()) epoch_index = 0 while epoch_index < epochs: samples, labels = batcher.get_batch(batch_size) model.train_model(session, samples, labels) log_model.train_model(session, samples, labels) soft_model.train_model(session, samples, labels) if batcher.epoch_finished(): batcher.reset_epoch() test_samples, test_labels = batcher.get_test_batch() fp = 0 fn = 0 pred = np.argmax(model.predict(session, test_samples), axis=1) for index, i in enumerate(np.argmax(test_labels, axis=1)): if pred[index] == 1 and i != pred[index]: fp += 1 if pred[index] == 0 and i != pred[index]: fn += 1
accuracy += model.get_accuracy(session, image_batches[i], label_batches[i]) accuracy /= len(image_batches) train_accuracy = 0 image_batches, label_batches = batcher.get_test_training_batches( 50) for i in range(len(image_batches)): train_accuracy += model.get_accuracy(session, image_batches[i], label_batches[i]) train_accuracy /= len(image_batches) accuracy_data.append(accuracy) train_accuracy_data.append(train_accuracy) print("Epoch %i \t| test_acc: %f | train_acc: %f | time: %f" % (epoch_index, accuracy, train_accuracy, time() - epoch_start_time)) saver.save(session, os.path.join("checkpoints/resnet_basic.ckpt")) batcher.prepare_epoch() step_index = 0 epoch_start_time = time() if epoch_index == epochs: break epoch_index += 1 images, labels = batcher.get_batch(batch_size) model.train_model(session, images, labels) # print("Step %i" % step_index) # step_index += 1 print("Training complete")
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from data_batcher import DataBatcher from vae_model import VAEModel from time import time import numpy as np import tensorflow as tf print("Finding training data...") batcher = DataBatcher("generated_data") print("Building model...") model = VAEModel(50, [40, 35, 30]) batch_size = 5000 training_steps = 200000 print("Starting training...") with tf.Session() as session: session.run(tf.global_variables_initializer()) for i in range(training_steps): batch, epoch_complete = batcher.get_batch(batch_size) model.train_model(session, inputs=batch) if epoch_complete: test_batch = batcher.get_test_batch() loss = model.get_loss(session, inputs=test_batch) print("Epoch complete - loss: {}".format(loss)) if i % 500 == 0: test_batch = batcher.get_test_batch() loss = model.get_loss(session, inputs=test_batch) print("Step {} - loss: {}".format(i, loss))
batcher = DataBatcher("normalizedData/trainingSetX.npy", "normalizedData/trainingSetY.npy", "normalizedData/testSetX.npy", "normalizedData/testSetY.npy") epochs = 500 batch_size = 20 plot_x = list() plot_y = list() with tf.Session() as session: session.run(tf.global_variables_initializer()) epoch_index = 0 while epoch_index < epochs: samples, classes_labels = batcher.get_batch(batch_size) session.run(train, feed_dict={inputs: samples, labels: classes_labels}) if batcher.epoch_finished(): batcher.reset_epoch() test_samples, test_labels = batcher.get_test_batch() class_loss = session.run(loss, feed_dict={ inputs: test_samples, labels: test_labels }) acc = session.run(accuracy, feed_dict={ inputs: test_samples, labels: test_labels }) plot_x.append(epoch_index + 1)