def quickrun3(operators, grabbed_vars=None, dir='probeview', feed_dict=None): sess = tf.Session() probe_stream = TFT.viewprep(sess, dir=dir) sess.run(tf.global_variables_initializer()) results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) sess.close() TFT.show_results(results[1], grabbed_vars, dir) return results
def quickrun2(operators, grabbed_vars=None, dir='probeview'): sess = tf.Session() probe_stream = TFT.viewprep(sess, dir=dir) sess.run(tf.global_variables_initializer()) results = sess.run([ operators, grabbed_vars ]) # result = a list of output values, one from each operator. sess.close() TFT.show_results(results[1], grabbed_vars, dir) return results
print("Validation Error: ", self.validation_error_history[-1][1]) # Consider validation testing here or something # TFT.fireup_tensorboard(logdir='probeview') # Plots.line([errors, self.validation_error_history]) print("\nFinished Training") print("Training Cost: " + str(self.training_error_history[-1][1])) print("Training Error %: " + str(self.training_error_history[-1][1] * 100) + " %") print("Validation Error: " + str(self.validation_error_history[-1][1])) print("Validation Error %: " + str(self.validation_error_history[-1][1] * 100) + "%") # Plots.scatter([self.training_error_history, self.validation_error_history], # ["Training Error", "Validation Error"]) # Plots.plotWeights([self.grabbed_weigths_history]) TFT.viewprep(sess) Plots.line([self.training_error_history, self.validation_error_history], ["Training Cost", "Validation Error"]) print("\nResults for Training Set") self.do_testing(self.case_manager.get_training_cases()) print("\nResults for Testing Set") self.do_testing(self.case_manager.get_testing_cases()) if self.config.mbsize > 0: # Should run map test print("\nRunning Map Tests") map_batch_size = self.config.mbsize np.random.shuffle(case_list) # Select random cases for this minibatch cases = case_list[:map_batch_size] self.do_testing(cases, grabvars=self.grabvars, scenario="mapping")