def do_training(self, epochs=100, test_interval=10, show_interval=50): errors = [] if test_interval: self.avg_vector_distances = [] self.current_session = sess = TFT.gen_initialized_session() step = 0 for i in range(epochs): error = 0 grabvars = [self.error] for c in self.cases: feeder = {self.input: [c[0]], self.target: [c[1]]} _, grabvals, _ = self.run_one_step([self.trainer], grabvars, step=step, show_interval=show_interval, session=sess, feed_dict=feeder) error += grabvals[0] step += 1 errors.append(error) if (test_interval and i % test_interval == 0): self.avg_vector_distances.append( calc_avg_vect_dist(self.do_testing(sess, scatter=False))) PLT.figure() TFT.simple_plot(errors, xtitle="Epoch", ytitle="Error", title="") if test_interval: PLT.figure() TFT.simple_plot(self.avg_vector_distances, xtitle='Epoch', ytitle='Avg Hidden-Node Vector Distance', title='')
def run_one_step(self, operators, grabbed_vars=None, probed_vars=None, dir='probeview', session=None, feed_dict=None, step=1, show_interval=1): """ Similar to the "quickrun" functions used earlier. """ sess = session if session else TFT.gen_initialized_session(dir=dir) if probed_vars is not None: results = sess.run([operators, grabbed_vars, probed_vars], feed_dict=feed_dict) sess.probe_stream.add_summary(results[2], global_step=step) else: results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) if show_interval and (step % show_interval == 0): self.display_grabvars(results[1], grabbed_vars, step=step) return results[0], results[1], sess
def run_one_step(self, operators, grabbed_vars=None, probed_vars=None, dir='probeview', session=None, feed_dict=None, step=1): sess = session if session else TFT.gen_initialized_session(dir=dir) if probed_vars is not None: results = sess.run([operators, grabbed_vars, probed_vars], feed_dict=feed_dict) sess.probe_stream.add_summary(results[2], global_step=step) else: results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) return results[0], results[1], sess
def run_one_step(self, operators, grabbed_vars=None, probed_vars=None, dir="probeview", session=None, feed_dict=None, step=1, display_interval=1, testing = False): sess = session if session else TFT.gen_initialized_session(dir=dir) if probed_vars is not None: results = sess.run([operators, grabbed_vars, probed_vars], feed_dict=feed_dict) sess.probe_stream.add_summary(results[2], global_step=step) else: results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) if display_interval and (step % display_interval == 0): self.display_grabvars(results[1], grabbed_vars, step=step) return results[0], results[1], sess
def training_session(self, epochs, sess=None, dir="probeview", continued=False): self.roundup_probes() session = sess if sess else TFT.gen_initialized_session(dir=dir) self.current_session = session self.do_training(session, self.caseman.get_training_cases(), epochs, continued=continued)
def quickrun4(operators, grabbed_vars=None, dir='probeview', session=None, feed_dict=None, step=1, show_interval=1): sess = session if session else TFT.gen_initialized_session(dir=dir) results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) if (show_interval and step % show_interval) == 0: TFT.show_results(results[1], grabbed_vars, dir) return results[0], results[1], sess
def training_session(self, steps, sess=None, dir="probeview", continued=False): session = sess if sess else TFT.gen_initialized_session(dir=dir) self.current_session = session self.roundup_probes( ) # this call must come AFTER the session is created, else graph is not in tensorboard. self.do_training(session, self.caseman.get_training_cases(), steps, continued=continued)
def do_training(self, epochs=100, test_interval=10, show_interval=50, mbs=100): errors = [] self.val_error = [] self.train_error = [] if test_interval: self.avg_vector_distances = [] self.current_session = sess = TFT.gen_initialized_session() for i in range(epochs): self.current_epoch = i error = 0 grabvars = [self.error] step = self.global_step + i ncases = len(self.training_cases) nmb = math.ceil(ncases / mbs) for cstart in range(0, ncases, mbs): cend = min(ncases, cstart + mbs) minibatch = self.training_cases[cstart:cend] feeder = { self.input: [c[0] for c in minibatch], self.target: [c[1] for c in minibatch] } _, grabvals, _ = self.run_one_step([self.trainer], grabvars, step=step, show_interval=show_interval, session=sess, feed_dict=feeder) error += grabvals[0] errors.append([i, error]) if (test_interval and i % test_interval == 0): self.do_testing(sess, scatter=False, mbs=len(self.training_cases), testset="training") if (len(self.validation_cases) != 0): self.do_testing(sess, scatter=False, mbs=len(self.validation_cases), testset="validation") if (len(self.testing_cases) != 0): self.do_testing(sess, scatter=False, mbs=mbs, testset="testing") #TFT.simple_plot(errors,xtitle="Epoch",ytitle="Error",title="") TFT.plot_training_history(self.train_error, self.val_error)
def loadParams(self, layerDims, loadPath, globalStep): self.anet = ANET( layer_dims = layerDims, softmax=True, case_manager = CaseManager([])) session = TFT.gen_initialized_session(dir="probeview") self.anet.current_session = session state_vars = [] for m in self.anet.layer_modules: vars = [m.getvar('wgt'), m.getvar('bias')] state_vars = state_vars + vars self.anet.state_saver = tf.train.Saver(state_vars) self.anet.state_saver.restore(self.anet.current_session, loadPath+"-"+str(globalStep))
def runOneStep(self, operators, grabbedVars = None, probedVars = None, dir = 'probeview', session = None, feedDict = None, step = 1, showInterval = 1): sess = session if session else tft.gen_initialized_session(dir=dir) if probed_vars is not None: results = sess.run([operators, grabbedVars, probedVars], feed_dict = feedDict) sess.probe_stream.add_summary(results[2], global_step = step) else: results = sess.run([operators, grabbedVars, probedVars], feed_dict = feedDict) if showInterval and (step % showInterval == 0): self.displayGrabvars(results[1], grabbedVars, step = step) return results[0], results[1], sess
def tfex8(size=5, steps=50, tvect=None, learning_rate=0.5, showint=10): target = tvect if tvect else np.ones((1, size)) w = tf.Variable(np.random.uniform(-.1, .1, size=(size, size)), name='weights') # weights applied to x. b = tf.Variable(np.zeros((1, size)), name='bias') # bias terms x = tf.placeholder(tf.float64, shape=(1, size), name='input') y = tf.sigmoid( tf.matmul(x, w) + b, name='out-sigmoid' ) # Gather all weighted inputs, then apply activation function error = tf.reduce_mean(tf.square(target - y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_operator = optimizer.minimize(error) feeder = {x: np.random.uniform(-1, 1, size=(1, size))} sess = TFT.gen_initialized_session() for step in range(steps): quickrun4([training_operator], [w, b, y], session=sess, feed_dict=feeder, step=step, show_interval=showint) TFT.close_session(sess)
def do_training(self, epochs=100, test_interval=10, show_interval=50, mbs=100): errors = [] if test_interval: self.avg_vector_distances = [] self.current_session = sess = TFT.gen_initialized_session() for i in range(epochs): error = 0 grabvars = [self.error] step = self.global_step + i ncases = len(self.cases) nmb = math.ceil(ncases / mbs) for cstart in range(0, ncases, mbs): cend = min(ncases, cstart + mbs) minibatch = self.cases[cstart:cend] feeder = { self.input: [c[0] for c in minibatch], self.target: [c[1] for c in minibatch] } _, grabvals, _ = self.run_one_step([self.trainer], grabvars, step=step, show_interval=show_interval, session=sess, feed_dict=feeder) error += grabvals[0] errors.append(error) if (test_interval and i % test_interval == 0): self.do_testing(sess, scatter=False, mbs=mbs) PLT.figure() TFT.simple_plot(errors, xtitle="Epoch", ytitle="Error", title="") if test_interval: PLT.figure() TFT.simple_plot(self.avg_vector_distances, xtitle='Epoch', ytitle='Avg Hidden-Node Vector Distance', title='')
def run_one_step(self, operators, grabbed_vars=None, probed_vars=None, dir='probeview', session=None, feed_dict=None, step=1, show_interval=1, mapping=False, dendrogram=False, onezero=False, komma=False, punktum=False, decimals=1, dendro_time=30, leaf_font_size=None): sess = session if session else TFT.gen_initialized_session(dir=dir) if probed_vars is not None: results = sess.run([operators, grabbed_vars, probed_vars], feed_dict=feed_dict) sess.probe_stream.add_summary(results[2], global_step=step) else: results = sess.run([operators, grabbed_vars], feed_dict=feed_dict) if show_interval and (step % show_interval == 0): self.display_grabvars(results[1], grabbed_vars, step=step) if mapping: self.display_grabvars(results[1], grabbed_vars, step=step) if dendrogram: self.display_dendrograms(results[1], grabbed_vars, step=step, onezero=onezero, komma=komma, punktum=punktum, decimals=decimals, sleep_time=dendro_time, leaf_font_size=leaf_font_size) return results[0], results[1], sess
def setupSession(self, sess=None, dir="probeview"): session = sess if sess else TFT.gen_initialized_session(dir=dir) self.roundup_probes() self.current_session = session