def do_training(self, epochs=100, test_interval=10, show_interval=50): errors = [] if test_interval: self.avg_vector_distances = [] self.current_session = sess = TFT.gen_initialized_session() step = 0 for i in range(epochs): error = 0 grabvars = [self.error] for c in self.cases: feeder = {self.input: [c[0]], self.target: [c[1]]} _, grabvals, _ = self.run_one_step([self.trainer], grabvars, step=step, show_interval=show_interval, session=sess, feed_dict=feeder) error += grabvals[0] step += 1 errors.append(error) if (test_interval and i % test_interval == 0): self.avg_vector_distances.append( calc_avg_vect_dist(self.do_testing(sess, scatter=False))) PLT.figure() TFT.simple_plot(errors, xtitle="Epoch", ytitle="Error", title="") if test_interval: PLT.figure() TFT.simple_plot(self.avg_vector_distances, xtitle='Epoch', ytitle='Avg Hidden-Node Vector Distance', title='')
def training_session(self, epochs, sess=None, dir="probeview", continued=False): self.roundup_probes() session = sess if sess else TFT.gen_initialized_session(dir=dir) self.current_session = session self.do_training(session, self.caseman.get_training_cases(), epochs, continued=continued)
def quickrun4(operators, grabbed_vars=None, dir='probeview', session=None, feed_dict=None, step=1, show_interval=1): sess = session if session else TFT.gen_initialized_session(dir=dir) results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) if (show_interval and step % show_interval) == 0: TFT.show_results(results[1], grabbed_vars, dir) return results[0], results[1], sess
def run_one_step(self, operators, grabbed_vars=None, probed_vars=None, dir='probeview', session=None, feed_dict=None, step=1, show_interval=1): sess = session if session else TFT.gen_initialized_session(dir=dir) if probed_vars is not None: results = sess.run([operators, grabbed_vars, probed_vars], feed_dict=feed_dict) sess.probe_stream.add_summary(results[2], global_step=step) else: results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) if show_interval and (step % show_interval == 0): self.display_grabvars(results[1], grabbed_vars, step=step) return results[0], results[1], sess
def tfex8(size=5, steps=50, tvect=None, learning_rate=0.5, showint=10): target = tvect if tvect else np.ones((1, size)) w = tf.Variable(np.random.uniform(-.1, .1, size=(size, size)), name='weights') # weights applied to x. b = tf.Variable(np.zeros((1, size)), name='bias') # bias terms x = tf.placeholder(tf.float64, shape=(1, size), name='input') y = tf.sigmoid( tf.matmul(x, w) + b, name='out-sigmoid' ) # Gather all weighted inputs, then apply activation function error = tf.reduce_mean(tf.square(target - y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_operator = optimizer.minimize(error) feeder = {x: np.random.uniform(-1, 1, size=(1, size))} sess = TFT.gen_initialized_session() for step in range(steps): quickrun4([training_operator], [w, b, y], session=sess, feed_dict=feeder, step=step, show_interval=showint) TFT.close_session(sess)