def testConstructWrapperWithExistingEmptyDumpRoot(self): os.mkdir(self._tmp_dir) self.assertTrue(os.path.isdir(self._tmp_dir)) local_cli.LocalCLIDebugWrapperSession(session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self): os.mkdir(self._tmp_dir) file_path = os.path.join(self._tmp_dir, "foo") open(file_path, "a").close() # Create the file self.assertTrue(os.path.isfile(file_path)) with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"): local_cli.LocalCLIDebugWrapperSession(session.Session(), dump_root=file_path)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self): os.mkdir(self._tmp_dir) dir_path = os.path.join(self._tmp_dir, "foo") os.mkdir(dir_path) self.assertTrue(os.path.isdir(dir_path)) with self.assertRaisesRegexp( ValueError, "dump_root path points to a non-empty directory"): local_cli.LocalCLIDebugWrapperSession(session.Session(), dump_root=self._tmp_dir)
def main(_): sess = tf.Session() # Construct the TensorFlow network. n0 = tf.Variable(np.ones([FLAGS.tensor_size] * 2), name="node_00") n1 = tf.Variable(np.ones([FLAGS.tensor_size] * 2), name="node_01") if FLAGS.length > 100: raise ValueError("n is too big.") for i in xrange(2, FLAGS.length): n0, n1 = n1, tf.add(n0, n1, name="node_%.2d" % i) sess.run(tf.initialize_all_variables()) # Wrap the TensorFlow Session object for debugging. sess = local_cli.LocalCLIDebugWrapperSession(sess) sess.run(n1)
def testConstructWrapper(self): local_cli.LocalCLIDebugWrapperSession(session.Session(), log_usage=False)
def main(_): # Import data mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) def feed_dict(train): if train: xs, ys = mnist.train.next_batch(FLAGS.train_batch_size, fake_data=False) else: xs, ys = mnist.test.images, mnist.test.labels return {x: xs, y_: ys} sess = tf.InteractiveSession() # Create the MNIST neural network graph. # Input placeholders. with tf.name_scope("input"): x = tf.placeholder( tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE], name="x-input") y_ = tf.placeholder(tf.float32, [None, NUM_LABELS], name="y-input") def weight_variable(shape): """Create a weight variable with appropriate initialization.""" initial = tf.truncated_normal(shape, stddev=0.1, seed=RAND_SEED) return tf.Variable(initial) def bias_variable(shape): """Create a bias variable with appropriate initialization.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer.""" # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope(layer_name): # This Variable will hold the state of the weights for the layer with tf.name_scope("weights"): weights = weight_variable([input_dim, output_dim]) with tf.name_scope("biases"): biases = bias_variable([output_dim]) with tf.name_scope("Wx_plus_b"): preactivate = tf.matmul(input_tensor, weights) + biases activations = act(preactivate) return activations hidden = nn_layer(x, IMAGE_SIZE**2, HIDDEN_SIZE, "hidden") y = nn_layer(hidden, HIDDEN_SIZE, NUM_LABELS, "softmax", act=tf.nn.softmax) with tf.name_scope("cross_entropy"): # The following line is the culprit of the bad numerical values that appear # during training of this graph. Log of zero gives inf, which is first seen # in the intermediate tensor "cross_entropy/Log:0" during the 4th run() # call. A multiplication of the inf values with zeros leads to nans, # which is first in "cross_entropy/mul:0". # # You can use clipping to fix this issue, e.g., # diff = y_ * tf.log(tf.clip_by_value(y, 1e-8, 1.0)) diff = y_ * tf.log(y) with tf.name_scope("total"): cross_entropy = -tf.reduce_mean(diff) with tf.name_scope("train"): train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( cross_entropy) with tf.name_scope("accuracy"): with tf.name_scope("correct_prediction"): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope("accuracy"): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.initialize_all_variables()) if FLAGS.debug: sess = local_cli.LocalCLIDebugWrapperSession(sess) sess.add_tensor_filter("has_inf_or_nan", debug_data.has_inf_or_nan) # Add this point, sess is a debug wrapper around the actual Session if # FLAGS.debug is true. In that case, calling run() will launch the CLI. for i in range(FLAGS.max_steps): acc = sess.run(accuracy, feed_dict=feed_dict(False)) print("Accuracy at step %d: %s" % (i, acc)) sess.run(train_step, feed_dict=feed_dict(True))
def testConstructWrapper(self): local_cli.LocalCLIDebugWrapperSession(session.Session())