def test_1(self): # single passthrough (train_data_node, train_labels_node, validation_data_node, test_data_node, # predictions train_prediction, validation_prediction, test_prediction, # weights conv1_weights, conv2_weights, fc1_weights, fc2_weights, # training optimizer, loss, learning_rate, summaries) = mnist.build_model() with tf.Session() as s: tf.initialize_all_variables().run() print('Variables initialized') step=0 with np.load(data_file) as data: feed_dict = { train_data_node: data['batch_data'], train_labels_node: data['batch_labels'] } evals=[train_prediction] status,result=tdb.debug(evals, feed_dict=feed_dict, breakpoints=None, break_immediately=False, session=s) self.assertEqual(status,tdb.FINISHED)
def test_1(self): # single passthrough (train_data_node, train_labels_node, validation_data_node, test_data_node, # predictions train_prediction, validation_prediction, test_prediction, # weights conv1_weights, conv2_weights, fc1_weights, fc2_weights, # training optimizer, loss, learning_rate, summaries) = mnist.build_model() with tf.Session() as s: tf.initialize_all_variables().run() print('Variables initialized') step=0 with np.load("mnist_0.npz") as data: feed_dict = { train_data_node: data['batch_data'], train_labels_node: data['batch_labels'] } evals=[train_prediction] status,result=tdb.debug(evals, feed_dict=feed_dict, breakpoints=None, break_immediately=False, session=s) self.assertEqual(status,tdb.FINISHED)
def test_2(self): """ mnist with plotting """ (train_data_node, train_labels_node, validation_data_node, test_data_node, # predictions train_prediction, validation_prediction, test_prediction, # weights conv1_weights, conv2_weights, fc1_weights, fc2_weights, # training optimizer, loss, learning_rate, summaries) = mnist.build_model() s=tf.InteractiveSession() tf.initialize_all_variables().run() # use the same input every time for this test with np.load(data_file) as data: a=data['batch_data'] b=data['batch_labels'] feed_dict = { train_data_node: a, train_labels_node: b } # pdb.set_trace() # result=s.run(optimizer,feed_dict) # pdb.set_trace() # tmp # return evals=[optimizer,loss,train_prediction,conv1_weights,conv2_weights,fc1_weights,fc2_weights] # define some plotting functions # use one debugSession per run # attach plot nodes g=tf.get_default_graph() p1=tdb.plot_op(viz.viz_conv_weights,inputs=[g.as_graph_element(conv1_weights)]) p2=tdb.plot_op(viz.viz_conv_weights,inputs=[g.as_graph_element(conv2_weights)]) p3=tdb.plot_op(viz.viz_fc_weights,inputs=[g.as_graph_element(fc1_weights)]) p4=tdb.plot_op(viz.viz_fc_weights,inputs=[g.as_graph_element(fc2_weights)]) # get the plot op by name and evals=[optimizer, loss, learning_rate, train_prediction, p1,p2,p3,p4] status,result=tdb.debug(evals, feed_dict=feed_dict, session=s)
def test_2(self): """ mnist with plotting """ (train_data_node, train_labels_node, validation_data_node, test_data_node, # predictions train_prediction, validation_prediction, test_prediction, # weights conv1_weights, conv2_weights, fc1_weights, fc2_weights, # training optimizer, loss, learning_rate, summaries) = mnist.build_model() s=tf.InteractiveSession() tf.initialize_all_variables().run() # use the same input every time for this test with np.load("mnist_0.npz") as data: a=data['batch_data'] b=data['batch_labels'] feed_dict = { train_data_node: a, train_labels_node: b } # pdb.set_trace() # result=s.run(optimizer,feed_dict) # pdb.set_trace() # tmp # return evals=[optimizer,loss,train_prediction,conv1_weights,conv2_weights,fc1_weights,fc2_weights] # define some plotting functions # use one debugSession per run # attach plot nodes g=tf.get_default_graph() p1=tdb.plot_op(viz.viz_conv_weights,inputs=[g.as_graph_element(conv1_weights)]) p2=tdb.plot_op(viz.viz_conv_weights,inputs=[g.as_graph_element(conv2_weights)]) p3=tdb.plot_op(viz.viz_fc_weights,inputs=[g.as_graph_element(fc1_weights)]) p4=tdb.plot_op(viz.viz_fc_weights,inputs=[g.as_graph_element(fc2_weights)]) # get the plot op by name and evals=[optimizer, loss, learning_rate, train_prediction, p1,p2,p3,p4] status,result=tdb.debug(evals, feed_dict=feed_dict, session=s)
def test_2(self): """ See TestDebuggingTF.test_2 """ a,b,c,c2,d=build_graph_ht() status,result=tdb.debug([c], feed_dict=None, breakpoints=None, break_immediately=True) self.assertEqual(status, tdb.PAUSED) status,result=tdb.c() # continue self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],5) # check that c = 5
def test_1(self): """ ht->tf """ a=tf.constant(2) b=tf.constant(3) c=tdb.python_op(myadd,inputs=[a,b],outputs=[tf.placeholder(tf.int32)]) # a+b d=tf.neg(c) status,result=tdb.debug([d], feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],-5)
def test_3(self): """ with breakpoints """ # construct TensorFlow graph as usual a,b,c,c2,d=build_graph_tf() status,result=tdb.debug(d, feed_dict=None, breakpoints=[c], break_immediately=False) self.assertEqual(status, tdb.PAUSED) self.assertEqual(result, None) status,result=tdb.c() self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],-5)
def test_2(self): """ single eval of the pentultimate node breka immediately. verify that the execution order does NOT contain d or c2 """ a,b,c,c2,d=build_graph_tf() status,result=tdb.debug([c], feed_dict=None, breakpoints=None, break_immediately=True) self.assertEqual(status, tdb.PAUSED) status,result=tdb.c() # continue self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],5) # check that c = 5
def test_2(self): """ tf -> ht """ a=tf.constant(2) b=tf.constant(3) c=tf.add(a,b) d=tdb.python_op(myneg,inputs=[c],outputs=[tf.placeholder(tf.int32)]) status,result=tdb.debug([d], feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],-5) self.assertEqual(tdb.get_value(d),-5) self.assertEqual(tdb.get_value(d.name),-5)
def test_1(self): """ See TestDebuggingTF.test_1 """ # construct TensorFlow graph as usual a,b,c,c2,d=build_graph_ht() evals=[a,b,c,c2,d] status,result=tdb.debug(evals, feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],2) # a = 2 self.assertEqual(result[1],3) # b = 3 self.assertEqual(result[2],5) # c = 5 self.assertEqual(result[4],-5) # c2 = -5 self.assertEqual(result[3],6) # d = 6
def test_3(self): """ with breakpoints """ # construct TensorFlow graph as usual a, b, c, c2, d = build_graph_tf() status, result = tdb.debug(d, feed_dict=None, breakpoints=[c], break_immediately=False) self.assertEqual(status, tdb.PAUSED) self.assertEqual(result, None) status, result = tdb.c() self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0], -5)
def test_2(self): """ single eval of the pentultimate node breka immediately. verify that the execution order does NOT contain d or c2 """ a, b, c, c2, d = build_graph_tf() status, result = tdb.debug([c], feed_dict=None, breakpoints=None, break_immediately=True) self.assertEqual(status, tdb.PAUSED) status, result = tdb.c() # continue self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0], 5) # check that c = 5
def test_1(self): """ test debugging of a pure TensorFlow graph no breakpoints, all nodes evaluated this should automatically build an InteractiveSession for us and create a HyperTree """ # construct TensorFlow graph as usual a,b,c,c2,d=build_graph_tf() evals=[a,b,c,c2,d] status,result=tdb.debug(evals, feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0],2) # a = 2 self.assertEqual(result[1],3) # b = 3 self.assertEqual(result[2],5) # c = 5 self.assertEqual(result[4],-5) # c2 = 6 self.assertEqual(result[3],6) # d = -5
def test_1(self): """ ht->tf """ a = tf.constant(2) b = tf.constant(3) c = tdb.python_op(myadd, inputs=[a, b], outputs=[tf.placeholder(tf.int32)]) # a+b d = tf.neg(c) status, result = tdb.debug([d], feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0], -5)
def test_2(self): """ tf -> ht """ a = tf.constant(2) b = tf.constant(3) c = tf.add(a, b) d = tdb.python_op(myneg, inputs=[c], outputs=[tf.placeholder(tf.int32)]) status, result = tdb.debug([d], feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0], -5) self.assertEqual(tdb.get_value(d), -5) self.assertEqual(tdb.get_value(d.name), -5)
def test_1(self): """ test debugging of a pure TensorFlow graph no breakpoints, all nodes evaluated this should automatically build an InteractiveSession for us and create a HyperTree """ # construct TensorFlow graph as usual a, b, c, c2, d = build_graph_tf() evals = [a, b, c, c2, d] status, result = tdb.debug(evals, feed_dict=None, breakpoints=None, break_immediately=False) self.assertEqual(status, tdb.FINISHED) self.assertEqual(result[0], 2) # a = 2 self.assertEqual(result[1], 3) # b = 3 self.assertEqual(result[2], 5) # c = 5 self.assertEqual(result[4], -5) # c2 = 6 self.assertEqual(result[3], 6) # d = -5
# for f in files: # print(f) # urllib.urlretrieve(base_url+f, download_dir+f) # return the TF nodes corresponding to graph input placeholders (train_data, train_labels, validation_data, validation_labels, test_data, test_labels) = mnist.get_data(download_dir) # start the TensorFlow session that will be used to evaluate the graph s = tf.InteractiveSession() tf.initialize_all_variables().run() BATCH_SIZE = 64 NUM_EPOCHS = 5 TRAIN_SIZE = 10000 for step in range(NUM_EPOCHS * TRAIN_SIZE // BATCH_SIZE): offset = (step * BATCH_SIZE) % (TRAIN_SIZE - BATCH_SIZE) batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :] batch_labels = train_labels[offset:(offset + BATCH_SIZE)] feed_dict = {train_data_node: batch_data, train_labels_node: batch_labels} # run training node and visualization node status, result = tdb.debug([optimizer, p0], feed_dict=feed_dict, session=s) if step % 10 == 0: status, result = tdb.debug([loss, p1, p2, p3, p4, ploss], feed_dict=feed_dict, breakpoints=None, break_immediately=False, session=s) print('loss: %f' % (result[0]))