print("Model restored!") else: # otherwise, set all parameters to a small random value sess.run(tf.initialize_all_variables()) # making sure read pointer is at zero dataloader.reset_read_pointer() # randomize data dataloader.randomize() dataset_size = dataloader.get_data_size() # create confusion matrix confusion_matrix = np.zeros((12,12)) # start feeding in the batches for i in range(dataset_size // dataloader.get_batch_size()): # loading the next available batch test_spectro_batch, test_labels_batch = dataloader.load_next_batch() # generate one-hot encoding from the ground-truth labels of the current batch test_one_hot_batch = utils.generate_one_hot(test_labels_batch, dataloader.get_num_classes()) # get the predicted labels predicted_labels = model.prediction.eval(feed_dict={model.x: test_spectro_batch, model.y_: test_one_hot_batch, model.keep_prob: 1.0}) # get the accuaacy and print it test_accuracy = model.accuracy.eval(feed_dict={model.x: test_spectro_batch, model.y_: test_one_hot_batch, model.keep_prob: 1.0}) print("Test accuracy %g" % test_accuracy) # populate confusion matrix for j in range(len(predicted_labels)): confusion_matrix[test_labels_batch[j]][predicted_labels[j]] += 1 # print the confusion matrix print(confusion_matrix)
# begin training print("Starting training:") dataset_size = dataloader.get_data_size() # set number of epochs epochs = 30 for j in range(epochs): # on every epoch, reset read pointer and randomize data dataloader.reset_read_pointer() dataloader.randomize() print("Epoch number: %d" % j) for i in range(dataset_size // dataloader.get_batch_size()): # get next batch train_sepctro_batch, train_labels_batch = dataloader.load_next_batch() # generate one-hot encoding from the ground-truth labels of the current batch train_one_hot_batch = utils.generate_one_hot(train_labels_batch, dataloader.get_num_classes()) # get accuracy train_accuracy = model.accuracy.eval(feed_dict={model.x: train_sepctro_batch, model.y_: train_one_hot_batch, model.keep_prob: 1.0}) # and print it print('mini batch %d, training accuracy %g' % (i, train_accuracy)) model.train_step.run(feed_dict={model.x: train_sepctro_batch, model.y_: train_one_hot_batch, model.keep_prob: 0.5}) if not os.path.exists('checkpoints'): os.makedirs('checkpoints') # save to checkpoint file saver.save(sess, checkpoint_file) print("Model saved to %s" % checkpoint_file)