def testRunWithError(self): """Test the debug tensor dumping when error occurs in graph runtime.""" with session.Session() as sess: ph = tf.placeholder(tf.float32, name="mismatch/ph") x = tf.transpose(ph, name="mismatch/x") m = constant_op.constant(np.array([[1.0, 2.0]], dtype=np.float32), name="mismatch/m") y = math_ops.matmul(m, x, name="mismatch/y") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) with self.assertRaises(errors.OpError): sess.run(y, options=run_options, feed_dict={ph: np.array([[-3.0], [0.0]])}) dump = debug_data.DebugDumpDir(self._dump_root) self.assertFalse(dump.loaded_partition_graphs()) m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity") self.assertEqual(1, len(m_dumps)) self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor()) x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity") self.assertEqual(1, len(x_dumps)) self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() with session.Session() as sess: # 2400 elements should exceed the default threshold (2000). x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(x, options=run_options, run_metadata=run_metadata) cls._debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer. cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump) # Construct the handler registry. cls._registry = debugger_cli_common.CommandHandlerRegistry() # Register command handler. cls._registry.register_command_handler( "print_tensor", cls._analyzer.print_tensor, cls._analyzer.get_help("print_tensor"), prefix_aliases=["pt"])
def before_run(self, run_context): if not self._wrapper_initialized: dumping_wrapper.DumpingDebugWrapperSession.__init__( self, run_context.session, self._session_root, watch_fn=self._watch_fn, log_usage=self._log_usage) self._wrapper_initialized = True self._run_call_count += 1 (debug_urls, debug_ops, node_name_regex_whitelist, op_type_regex_whitelist) = self._prepare_run_watch_config( run_context.original_args.fetches, run_context.original_args.feed_dict) run_options = config_pb2.RunOptions() debug_utils.watch_graph( run_options, run_context.session.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_whitelist=node_name_regex_whitelist, op_type_regex_whitelist=op_type_regex_whitelist) run_args = session_run_hook.SessionRunArgs( None, feed_dict=None, options=run_options) return run_args
def _decorate_run_options(self, run_options, debug_urls, debug_ops="DebugIdentity", node_name_regex_whitelist=None, op_type_regex_whitelist=None): """Modify a RunOptions object for debug tensor watching. Specifies request for outputting partition graphs. Adds debug_tensor_watch_opts with proper debug URLs. Args: run_options: (RunOptions) the modified RunOptions object. debug_urls: (list of str) debug URLs to be entered in run_options. debug_tensor_watch_opts. debug_ops: (str or list of str) debug op(s) to be used by the debugger. node_name_regex_whitelist: Regular-expression whitelist for node name. op_type_regex_whitelist: Regular-expression whitelist for op type. """ run_options.output_partition_graphs = True debug_utils.watch_graph( run_options, self._sess.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_whitelist=node_name_regex_whitelist, op_type_regex_whitelist=op_type_regex_whitelist)
def testWatchGraph_allNodes(self): debug_utils.watch_graph( self._run_options, self._graph, debug_ops=["DebugIdentity", "DebugNanCount"], debug_urls="file:///tmp/tfdbg_1") self.assertEqual(self._expected_num_nodes, len(self._run_options.debug_tensor_watch_opts)) # Verify that each of the nodes in the graph with output tensors in the # graph have debug tensor watch. node_names = self._verify_watches(self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity", "DebugNanCount"], ["file:///tmp/tfdbg_1"]) # Verify the node names. self.assertTrue("a1_init" in node_names) self.assertTrue("a1" in node_names) self.assertTrue("a1/Assign" in node_names) self.assertTrue("a1/read" in node_names) self.assertTrue("b_init" in node_names) self.assertTrue("b" in node_names) self.assertTrue("b/Assign" in node_names) self.assertTrue("b/read" in node_names) self.assertTrue("c" in node_names) self.assertTrue("p1" in node_names) self.assertTrue("s" in node_names)
def testWatchingOutputSlotWithoutOutgoingEdge(self): """Test watching output slots not attached to any outgoing edges.""" with session.Session() as sess: u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]]) u = constant_op.constant(u_init_val, shape=[2, 2], name="u") # Create a control edge from a node with an output: From u to z. # Node u will get executed only because of the control edge. The output # tensor u:0 is not attached to any outgoing edge in the graph. This test # checks that the debugger can watch such a tensor. with ops.control_dependencies([u]): z = control_flow_ops.no_op(name="z") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Assert that the DebugIdentity watch on u works properly. self.assertEqual(1, len(dump.dumped_tensor_data)) datum = dump.dumped_tensor_data[0] self.assertEqual("u", datum.node_name) self.assertEqual(0, datum.output_slot) self.assertEqual("DebugIdentity", datum.debug_op) self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() with session.Session() as sess: # 2400 elements should exceed the default threshold (2000). x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(x, options=run_options, run_metadata=run_metadata) cls._debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer. cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump) # Construct the handler registry. cls._registry = debugger_cli_common.CommandHandlerRegistry() # Register command handler. cls._registry.register_command_handler( "print_tensor", cls._analyzer.print_tensor, cls._analyzer.get_help("print_tensor"), prefix_aliases=["pt"])
def _session_run_for_graph_structure_lookup(self): with session.Session() as sess: u_name = "testDumpGraphStructureLookup/u" v_name = "testDumpGraphStructureLookup/v" w_name = "testDumpGraphStructureLookup/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) return u_name, v_name, w_name, dump
def testRunWithError(self): """Test the debug tensor dumping when error occurs in graph runtime.""" with session.Session() as sess: ph = tf.placeholder(tf.float32, name="mismatch/ph") x = tf.transpose(ph, name="mismatch/x") m = constant_op.constant( np.array( [[1.0, 2.0]], dtype=np.float32), name="mismatch/m") y = math_ops.matmul(m, x, name="mismatch/y") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) with self.assertRaises(errors.OpError): sess.run(y, options=run_options, feed_dict={ph: np.array([[-3.0], [0.0]])}) dump = debug_data.DebugDumpDir(self._dump_root) self.assertFalse(dump.loaded_partition_graphs()) m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity") self.assertEqual(1, len(m_dumps)) self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor()) x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity") self.assertEqual(1, len(x_dumps)) self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def _session_run_for_graph_structure_lookup(self): with session.Session() as sess: u_name = "testDumpGraphStructureLookup/u" v_name = "testDumpGraphStructureLookup/v" w_name = "testDumpGraphStructureLookup/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) return u_name, v_name, w_name, dump
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self): with session.Session() as sess: a = variables.Variable([42], dtype=np.float32, name="numeric_summary_uninit/a") run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugNumericSummary"], debug_urls=self._debug_urls()) sess.run(a.initializer, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) # DebugNumericSummary output should reflect the uninitialized state of # the watched tensor. numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0, "DebugNumericSummary")[0] self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], numeric_summary[0:8]) self.assertTrue(np.isinf(numeric_summary[8])) self.assertGreater(numeric_summary[8], 0.0) self.assertTrue(np.isinf(numeric_summary[9])) self.assertLess(numeric_summary[9], 0.0) self.assertTrue(np.isnan(numeric_summary[10])) self.assertTrue(np.isnan(numeric_summary[11]))
def testWatchingOutputSlotWithoutOutgoingEdge(self): """Test watching output slots not attached to any outgoing edges.""" with session.Session() as sess: u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]]) u = constant_op.constant(u_init_val, shape=[2, 2], name="u") # Create a control edge from a node with an output: From u to z. # Node u will get executed only because of the control edge. The output # tensor u:0 is not attached to any outgoing edge in the graph. This test # checks that the debugger can watch such a tensor. with ops.control_dependencies([u]): z = control_flow_ops.no_op(name="z") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Assert that the DebugIdentity watch on u works properly. self.assertEqual(1, len(dump.dumped_tensor_data)) datum = dump.dumped_tensor_data[0] self.assertEqual("u", datum.node_name) self.assertEqual(0, datum.output_slot) self.assertEqual("DebugIdentity", datum.debug_op) self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self): with session.Session() as sess: a = variables.Variable( [42], dtype=np.float32, name="numeric_summary_uninit/a") run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugNumericSummary"], debug_urls=self._debug_urls()) sess.run(a.initializer, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) # DebugNumericSummary output should reflect the uninitialized state of # the watched tensor. numeric_summary = dump.get_tensors( "numeric_summary_uninit/a", 0, "DebugNumericSummary")[0] self.assertAllClose( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], numeric_summary[0:8]) self.assertTrue(np.isinf(numeric_summary[8])) self.assertGreater(numeric_summary[8], 0.0) self.assertTrue(np.isinf(numeric_summary[9])) self.assertLess(numeric_summary[9], 0.0) self.assertTrue(np.isnan(numeric_summary[10])) self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self): with session.Session() as sess: a = variables.Variable( [ np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf, -np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan ], dtype=np.float32, name="numeric_summary/a") b = variables.Variable( [0.0] * 18, dtype=np.float32, name="numeric_summary/b") c = math_ops.add(a, b, name="numeric_summary/c") sess.run(variables.global_variables_initializer()) run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugNumericSummary"], debug_urls=self._debug_urls()) sess.run(c, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) self.assertAllClose([[ 1.0, 18.0, 2.0, 2.0, 3.0, 2.0, 5.0, 4.0, -3.0, 7.0, 0.85714286, 8.97959184 ]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self): with session.Session() as sess: a = variables.Variable([ np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf, -np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan ], dtype=np.float32, name="numeric_summary/a") b = variables.Variable([0.0] * 18, dtype=np.float32, name="numeric_summary/b") c = math_ops.add(a, b, name="numeric_summary/c") sess.run(variables.global_variables_initializer()) run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugNumericSummary"], debug_urls=self._debug_urls()) sess.run(c, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) self.assertAllClose([[ 1.0, 18.0, 2.0, 2.0, 3.0, 2.0, 5.0, 4.0, -3.0, 7.0, 0.85714286, 8.97959184 ]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testWatchGraph_allNodes(self): debug_utils.watch_graph(self._run_options, self._graph, debug_ops=["DebugIdentity", "DebugNanCount"], debug_urls="file:///tmp/tfdbg_1") self.assertEqual(self._expected_num_nodes, len(self._run_options.debug_tensor_watch_opts)) # Verify that each of the nodes in the graph with output tensors in the # graph have debug tensor watch. node_names = self._verify_watches( self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity", "DebugNanCount"], ["file:///tmp/tfdbg_1"]) # Verify the node names. self.assertTrue("a1_init" in node_names) self.assertTrue("a1" in node_names) self.assertTrue("a1/Assign" in node_names) self.assertTrue("a1/read" in node_names) self.assertTrue("b_init" in node_names) self.assertTrue("b" in node_names) self.assertTrue("b/Assign" in node_names) self.assertTrue("b/read" in node_names) self.assertTrue("c" in node_names) self.assertTrue("p1" in node_names) self.assertTrue("s" in node_names)
def before_run(self, run_context): if not self._wrapper_initialized: dumping_wrapper.DumpingDebugWrapperSession.__init__( self, run_context.session, self._session_root, watch_fn=self._watch_fn, log_usage=self._log_usage) self._wrapper_initialized = True self._run_call_count += 1 (debug_urls, debug_ops, node_name_regex_whitelist, op_type_regex_whitelist) = self._prepare_run_watch_config( run_context.original_args.fetches, run_context.original_args.feed_dict) run_options = config_pb2.RunOptions() debug_utils.watch_graph( run_options, run_context.session.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_whitelist=node_name_regex_whitelist, op_type_regex_whitelist=op_type_regex_whitelist) run_args = session_run_hook.SessionRunArgs(None, feed_dict=None, options=run_options) return run_args
def testDumpCausalityCheck(self): with session.Session() as sess: u_name = "testDumpCausalityCheck/u" v_name = "testDumpCausalityCheck/v" w_name = "testDumpCausalityCheck/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root ) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) # First, loading the original dump without supplying the # partition_graphs should not cause a RuntimeError, validation occurs # only with partition_graphs loaded. debug_data.DebugDumpDir(self._dump_root) # Now, loading the original dump with partition graphs supplied should # succeed. The validation should pass quietly. dump = debug_data.DebugDumpDir(self._dump_root, partition_graphs=run_metadata.partition_graphs) # Get the dump file names and compute their timestamps. self.assertEqual(1, len(dump.get_tensor_file_paths(u_name, 0, "DebugIdentity"))) u_file_path = dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")[0] self.assertEqual(1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity"))) v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0] u_timestamp = int(u_file_path[u_file_path.rindex("_") + 1 :]) v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1 :]) # Swap the time stamps new_u_file_path = u_file_path[: u_file_path.rindex("_")] + "_%d" % v_timestamp new_v_file_path = v_file_path[: v_file_path.rindex("_")] + "_%d" % u_timestamp os.rename(u_file_path, new_u_file_path) os.rename(v_file_path, new_v_file_path) # Load the dump directory again. Now a ValueError is expected to be # raised due to the timestamp swap. with self.assertRaisesRegexp(ValueError, "Causality violated"): dump = debug_data.DebugDumpDir(self._dump_root, partition_graphs=run_metadata.partition_graphs) # Loading the dump directory with kwarg "validate" set explicitly to # False should get rid of the error. dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs, validate=False )
def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() cls._is_gpu_available = test.is_gpu_available() if cls._is_gpu_available: cls._main_device = "/job:localhost/replica:0/task:0/gpu:0" else: cls._main_device = "/job:localhost/replica:0/task:0/cpu:0" with session.Session() as sess: x_init_val = np.array([5.0, 3.0]) x_init = constant_op.constant(x_init_val, shape=[2]) x = variables.Variable(x_init, name="control_deps/x") y = math_ops.add(x, x, name="control_deps/y") y = control_flow_ops.with_dependencies( [x], y, name="control_deps/ctrl_dep_y") z = math_ops.mul(x, y, name="control_deps/z") z = control_flow_ops.with_dependencies( [x, y], z, name="control_deps/ctrl_dep_z") x.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer. analyzer = analyzer_cli.DebugAnalyzer(debug_dump) # Construct the handler registry. cls._registry = debugger_cli_common.CommandHandlerRegistry() # Register command handlers. cls._registry.register_command_handler("node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"]) cls._registry.register_command_handler( "list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"]) cls._registry.register_command_handler( "list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"])
def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() cls._is_gpu_available = test.is_gpu_available() if cls._is_gpu_available: cls._main_device = "/job:localhost/replica:0/task:0/gpu:0" else: cls._main_device = "/job:localhost/replica:0/task:0/cpu:0" with session.Session() as sess: u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]]) v_init_val = np.array([[2.0], [-1.0]]) u_name = "simple_mul_add/u" v_name = "simple_mul_add/v" u_init = constant_op.constant(u_init_val, shape=[2, 2]) u = variables.Variable(u_init, name=u_name) v_init = constant_op.constant(v_init_val, shape=[2, 1]) v = variables.Variable(v_init, name=v_name) w = math_ops.matmul(u, v, name="simple_mul_add/matmul") x = math_ops.add(w, w, name="simple_mul_add/add") u.initializer.run() v.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(x, options=run_options, run_metadata=run_metadata) debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer. analyzer = analyzer_cli.DebugAnalyzer(debug_dump) # Construct the handler registry. cls._registry = debugger_cli_common.CommandHandlerRegistry() # Register command handlers. cls._registry.register_command_handler( "list_tensors", analyzer.list_tensors, analyzer.get_help("list_tensors"), prefix_aliases=["lt"]) cls._registry.register_command_handler( "node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"])
def testFindNodesWithBadTensorValues(self): with session.Session() as sess: u_name = "testFindNodesWithBadTensorValues/u" v_name = "testFindNodesWithBadTensorValues/v" w_name = "testFindNodesWithBadTensorValues/w" x_name = "testFindNodesWithBadTensorValues/x" y_name = "testFindNodesWithBadTensorValues/y" z_name = "testFindNodesWithBadTensorValues/z" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v_init = constant_op.constant([2.0, 1.0]) v = variables.Variable(v_init, name=v_name) # Expected output: [0.0, 3.0] w = math_ops.sub(u, v, name=w_name) # Expected output: [inf, 1.3333] x = math_ops.div(u, w, name=x_name) # Expected output: [nan, 4.0] y = math_ops.mul(w, x, name=y_name) z = math_ops.mul(y, y, name=z_name) u.initializer.run() v.initializer.run() run_options = config_pb2.RunOptions() debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root) run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir(self._dump_root) def has_bad_value(_, tensor): return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor)) # Find all "offending tensors". bad_data = dump.find(has_bad_value) # Verify that the nodes with bad values are caught through running find # on the debug dump. self.assertEqual(3, len(bad_data)) self.assertEqual(x_name, bad_data[0].node_name) self.assertEqual(y_name, bad_data[1].node_name) self.assertEqual(z_name, bad_data[2].node_name) # Test first_n kwarg of find(): Find the first offending tensor. first_bad_datum = dump.find(has_bad_value, first_n=1) self.assertEqual(1, len(first_bad_datum)) self.assertEqual(x_name, first_bad_datum[0].node_name)
def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() cls._is_gpu_available = test.is_gpu_available() if cls._is_gpu_available: cls._main_device = "/job:localhost/replica:0/task:0/gpu:0" else: cls._main_device = "/job:localhost/replica:0/task:0/cpu:0" with session.Session() as sess: u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]]) v_init_val = np.array([[2.0], [-1.0]]) u_name = "simple_mul_add/u" v_name = "simple_mul_add/v" u_init = constant_op.constant(u_init_val, shape=[2, 2]) u = variables.Variable(u_init, name=u_name) v_init = constant_op.constant(v_init_val, shape=[2, 1]) v = variables.Variable(v_init, name=v_name) w = math_ops.matmul(u, v, name="simple_mul_add/matmul") x = math_ops.add(w, w, name="simple_mul_add/add") u.initializer.run() v.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(x, options=run_options, run_metadata=run_metadata) debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer. analyzer = analyzer_cli.DebugAnalyzer(debug_dump) # Construct the handler registry. cls._registry = debugger_cli_common.CommandHandlerRegistry() # Register command handlers. cls._registry.register_command_handler( "list_tensors", analyzer.list_tensors, analyzer.get_help("list_tensors"), prefix_aliases=["lt"]) cls._registry.register_command_handler("node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"])
def testFindNodesWithBadTensorValues(self): with session.Session() as sess: u_name = "testFindNodesWithBadTensorValues/u" v_name = "testFindNodesWithBadTensorValues/v" w_name = "testFindNodesWithBadTensorValues/w" x_name = "testFindNodesWithBadTensorValues/x" y_name = "testFindNodesWithBadTensorValues/y" z_name = "testFindNodesWithBadTensorValues/z" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v_init = constant_op.constant([2.0, 1.0]) v = variables.Variable(v_init, name=v_name) # Expected output: [0.0, 3.0] w = math_ops.sub(u, v, name=w_name) # Expected output: [inf, 1.3333] x = math_ops.div(u, w, name=x_name) # Expected output: [nan, 4.0] y = math_ops.mul(w, x, name=y_name) z = math_ops.mul(y, y, name=z_name) u.initializer.run() v.initializer.run() run_options = config_pb2.RunOptions() debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root) run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir(self._dump_root) def has_bad_value(_, tensor): return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor)) # Find all "offending tensors". bad_data = dump.find(has_bad_value) # Verify that the nodes with bad values are caught through running find # on the debug dump. self.assertEqual(3, len(bad_data)) self.assertEqual(x_name, bad_data[0].node_name) self.assertEqual(y_name, bad_data[1].node_name) self.assertEqual(z_name, bad_data[2].node_name) # Test first_n kwarg of find(): Find the first offending tensor. first_bad_datum = dump.find(has_bad_value, first_n=1) self.assertEqual(1, len(first_bad_datum)) self.assertEqual(x_name, first_bad_datum[0].node_name)
def testWatchGraph_opTypeWhitelist(self): debug_utils.watch_graph(self._run_options, self._graph, debug_urls="file:///tmp/tfdbg_1", op_type_regex_whitelist="(Variable|MatMul)") node_names = self._verify_watches( self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity"], ["file:///tmp/tfdbg_1"]) self.assertEqual(sorted(["a1", "b", "p1"]), sorted(node_names))
def testWatchGraph_opTypeWhitelist(self): debug_utils.watch_graph( self._run_options, self._graph, debug_urls="file:///tmp/tfdbg_1", op_type_regex_whitelist="(Variable|MatMul)") node_names = self._verify_watches(self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity"], ["file:///tmp/tfdbg_1"]) self.assertEqual(sorted(["a1", "b", "p1"]), sorted(node_names))
def _decorate_options_for_debug(self, options, graph): """Modify RunOptions.debug_options.debug_tensor_watch_opts for debugging. Args: options: (config_pb2.RunOptions) The RunOptions instance to be modified. graph: A TensorFlow Graph object. """ debug_utils.watch_graph( options, graph, debug_urls=self._get_run_debug_urls()) options.output_partition_graphs = True
def testWatchGraph_nodeNameAndOpTypeWhitelists(self): debug_utils.watch_graph(self._run_options, self._graph, debug_urls="file:///tmp/tfdbg_1", node_name_regex_whitelist="([a-z]+1$)", op_type_regex_whitelist="(MatMul)") node_names = self._verify_watches( self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity"], ["file:///tmp/tfdbg_1"]) self.assertEqual(["p1"], node_names)
def testWatchGraph_nodeNameAndOpTypeWhitelists(self): debug_utils.watch_graph( self._run_options, self._graph, debug_urls="file:///tmp/tfdbg_1", node_name_regex_whitelist="([a-z]+1$)", op_type_regex_whitelist="(MatMul)") node_names = self._verify_watches(self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity"], ["file:///tmp/tfdbg_1"]) self.assertEqual(["p1"], node_names)
def testWatchingVariableUpdateOpsSeesUpdatedValues(self): """Watch output slots on Variable-updating ops, with no emitted edges.""" with session.Session() as sess: u_init = constant_op.constant(10.0) u = variables.Variable(u_init, name="gdo/u") v_init = constant_op.constant(20.0) v = variables.Variable(v_init, name="gdo/v") w = math_ops.multiply(u, v, name="gdo/w") # gdo stands for GradientDescentOptimizer. train_op = gradient_descent.GradientDescentOptimizer( learning_rate=0.1).minimize( w, name="gdo/train") u.initializer.run() v.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(train_op, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) update_u_data = dump.watch_key_to_data( "gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity") self.assertEqual(1, len(update_u_data)) # Gradient descent on u: w = u * v, so dw / du = v. # Updated value of u should be: # 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0 self.assertAllClose(8.0, update_u_data[0].get_tensor()) update_v_data = dump.watch_key_to_data( "gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity") self.assertEqual(1, len(update_v_data)) # Gradient descent on u: w = u * v, so dw / dv = u. # Updated value of u should be: # 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0 self.assertAllClose(19.0, update_v_data[0].get_tensor()) # Verify that the Variables u and v are updated properly. self.assertAllClose(8.0, sess.run(u)) self.assertAllClose(19.0, sess.run(v))
def testWatchingUnconnectedOutputTensor(self): """Watch an output slot not emitting any edges. (Not even control edges from the node.) """ with session.Session() as sess: x_init = constant_op.constant([2, 2, 3, 5, 5]) x = variables.Variable(x_init, name="unconnected/x") # The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the # graph. Let the debugger watch the unused slot 1. unique_x, _ = tf.unique(x, name="unconnected/unique_x") y = tf.add(unique_x, [0, 1, 2], name="unconnected/y") x.initializer.run() # Verify that only slot 0 of unique_x has recipients, while slot 1 of the # same node does not have recipients. unique_x_slot_0_recipients = [] unique_x_slot_1_recipients = [] for op in sess.graph.get_operations(): for inp in op.inputs: if inp.name == "unconnected/unique_x:0": unique_x_slot_0_recipients.append(op.name) elif inp.name == "unconnected/unique_x:1": unique_x_slot_1_recipients.append(op.name) self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients) self.assertEqual([], unique_x_slot_1_recipients) run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() result = sess.run(y, options=run_options, run_metadata=run_metadata) self.assertAllClose([2, 4, 7], result) dump = debug_data.DebugDumpDir(self._dump_root, partition_graphs=run_metadata.partition_graphs) # Assert that the connected slot (slot 0) is dumped properly. unique_x_slot_0_dumps = dump.watch_key_to_data("unconnected/unique_x:0:DebugIdentity") self.assertEqual(1, len(unique_x_slot_0_dumps)) self.assertEqual("unconnected/unique_x", unique_x_slot_0_dumps[0].node_name) self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot) self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor()) # Assert that the unconnected slot (slot 1) is dumped properly. unique_x_slot_1_dumps = dump.watch_key_to_data("unconnected/unique_x:1:DebugIdentity") self.assertEqual(1, len(unique_x_slot_1_dumps)) self.assertEqual("unconnected/unique_x", unique_x_slot_1_dumps[0].node_name) self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot) self.assertAllClose([0, 0, 1, 2, 2], unique_x_slot_1_dumps[0].get_tensor())
def _decorate_options_for_debug(self, options, graph): """Modify RunOptions.debug_options.debug_tensor_watch_opts for debugging. Args: options: (config_pb2.RunOptions) The RunOptions instance to be modified. graph: A TensorFlow Graph object. """ debug_utils.watch_graph(options, graph, debug_urls=self._get_run_debug_urls()) options.output_partition_graphs = True
def testWatchingVariableUpdateOps(self): """Watch output slots on Variable-updating ops, with no emitted edges.""" with session.Session() as sess: u_init = constant_op.constant(10.0) u = variables.Variable(u_init, name="gdo/u") v_init = constant_op.constant(20.0) v = variables.Variable(v_init, name="gdo/v") w = math_ops.mul(u, v, name="gdo/w") # gdo stands for GradientDescentOptimizer. train_op = tf.train.GradientDescentOptimizer( learning_rate=0.1).minimize(w, name="gdo/train") u.initializer.run() v.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(train_op, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) update_u_data = dump.watch_key_to_data( "gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity") self.assertEqual(1, len(update_u_data)) # Gradient descent on u: w = u * v, so dw / du = v. # Updated value of u should be: # 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0 self.assertAllClose(8.0, update_u_data[0].get_tensor()) update_v_data = dump.watch_key_to_data( "gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity") self.assertEqual(1, len(update_v_data)) # Gradient descent on u: w = u * v, so dw / dv = u. # Updated value of u should be: # 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0 self.assertAllClose(19.0, update_v_data[0].get_tensor()) # Verify that the Variables u and v are updated properly. self.assertAllClose(8.0, sess.run(u)) self.assertAllClose(19.0, sess.run(v))
def testWatchGraph_nodeNameWhitelist(self): debug_utils.watch_graph( self._run_options, self._graph, debug_urls="file:///tmp/tfdbg_1", node_name_regex_whitelist="(a1$|a1_init$|a1/.*|p1$)") node_names = self._verify_watches( self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity"], ["file:///tmp/tfdbg_1"]) self.assertEqual( sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]), sorted(node_names))
def testWatchGraph_nodeNameWhitelist(self): debug_utils.watch_graph( self._run_options, self._graph, debug_urls="file:///tmp/tfdbg_1", node_name_regex_whitelist="(a1$|a1_init$|a1/.*|p1$)") node_names = self._verify_watches(self._run_options.debug_tensor_watch_opts, 0, ["DebugIdentity"], ["file:///tmp/tfdbg_1"]) self.assertEqual( sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]), sorted(node_names))
def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() cls._is_gpu_available = test.is_gpu_available() if cls._is_gpu_available: cls._main_device = "/job:localhost/replica:0/task:0/gpu:0" else: cls._main_device = "/job:localhost/replica:0/task:0/cpu:0" with session.Session() as sess: x_init_val = np.array([5.0, 3.0]) x_init = constant_op.constant(x_init_val, shape=[2]) x = variables.Variable(x_init, name="control_deps/x") y = math_ops.add(x, x, name="control_deps/y") y = control_flow_ops.with_dependencies([x], y, name="control_deps/ctrl_dep_y") z = math_ops.mul(x, y, name="control_deps/z") z = control_flow_ops.with_dependencies([x, y], z, name="control_deps/ctrl_dep_z") x.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root ) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) debug_dump = debug_data.DebugDumpDir(cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer. analyzer = analyzer_cli.DebugAnalyzer(debug_dump) # Construct the handler registry. cls._registry = debugger_cli_common.CommandHandlerRegistry() # Register command handlers. cls._registry.register_command_handler( "node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"] ) cls._registry.register_command_handler( "list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"] ) cls._registry.register_command_handler( "list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"] )
def testLookUpNodePythonTracebackWorks(self): with session.Session() as sess: u_init = constant_op.constant(10.0) u = variables.Variable(u_init, name="traceback/u") v_init = constant_op.constant(20.0) v = variables.Variable(v_init, name="traceback/v") w = math_ops.multiply(u, v, name="traceback/w") sess.run(variables.global_variables_initializer()) run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_urls=self._debug_urls()) sess.run(w, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Prior to setting the Python graph, attempts to do traceback lookup # should lead to exceptions. with self.assertRaisesRegexp( LookupError, "Python graph is not available for traceback lookup"): dump.node_traceback("traceback/w") dump.set_python_graph(sess.graph) # After setting the Python graph, attempts to look up nonexistent nodes # should lead to exceptions. with self.assertRaisesRegexp( KeyError, r"Cannot find node \"foo\" in Python graph"): dump.node_traceback("foo") # Lookup should work with node name input. traceback = dump.node_traceback("traceback/w") self.assertIsInstance(traceback, list) self.assertGreater(len(traceback), 0) for trace in traceback: self.assertIsInstance(trace, tuple) # Lookup should also work with tensor name input. traceback = dump.node_traceback("traceback/w:0") self.assertIsInstance(traceback, list) self.assertGreater(len(traceback), 0) for trace in traceback: self.assertIsInstance(trace, tuple)
def _decorate_run_options(self, run_options, debug_urls): """Modify a RunOptions object for debug tensor watching. Specifies request for outputting partition graphs. Adds debug_tensor_watch_opts with proper debug URLs. Args: run_options: (RunOptions) the modified RunOptions object. debug_urls: (list of str) debug URLs to be entered in run_options. debug_tensor_watch_opts. """ run_options.output_partition_graphs = True debug_utils.watch_graph( run_options, self._sess.graph, debug_urls=debug_urls)
def testLookUpNodePythonTracebackWorks(self): with session.Session() as sess: u_init = constant_op.constant(10.0) u = variables.Variable(u_init, name="traceback/u") v_init = constant_op.constant(20.0) v = variables.Variable(v_init, name="traceback/v") w = math_ops.multiply(u, v, name="traceback/w") sess.run(variables.global_variables_initializer()) run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_urls=self._debug_urls()) sess.run(w, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Prior to setting the Python graph, attempts to do traceback lookup # should lead to exceptions. with self.assertRaisesRegexp( LookupError, "Python graph is not available for traceback lookup"): dump.node_traceback("traceback/w") dump.set_python_graph(sess.graph) # After setting the Python graph, attempts to look up nonexistent nodes # should lead to exceptions. with self.assertRaisesRegexp( KeyError, r"Cannot find node \"foo\" in Python graph"): dump.node_traceback("foo") # Lookup should work with node name input. traceback = dump.node_traceback("traceback/w") self.assertIsInstance(traceback, list) self.assertGreater(len(traceback), 0) for trace in traceback: self.assertIsInstance(trace, tuple) # Lookup should also work with tensor name input. traceback = dump.node_traceback("traceback/w:0") self.assertIsInstance(traceback, list) self.assertGreater(len(traceback), 0) for trace in traceback: self.assertIsInstance(trace, tuple)
def testDebugQueueOpsDoesNotoErrorOut(self): with session.Session() as sess: q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue") q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many") run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_urls=self._debug_urls()) sess.run(q_init, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) self.assertIsNone(dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]) self.assertAllClose( [101.0, 202.0, 303.0], dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testDebuggingDuringOpError(self): """Test the debug tensor dumping when error occurs in graph runtime.""" with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph") x = array_ops.transpose(ph, name="mismatch/x") m = constant_op.constant(np.array([[1.0, 2.0]], dtype=np.float32), name="mismatch/m") y = math_ops.matmul(m, x, name="mismatch/y") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) with self.assertRaises(errors.OpError): sess.run(y, options=run_options, feed_dict={ph: np.array([[-3.0], [0.0]])}) dump = debug_data.DebugDumpDir(self._dump_root) # Despite the fact that the run() call errored out and partition_graphs # are not available via run_metadata, the partition graphs should still # have been loaded from the dump directory. self.assertTrue(dump.loaded_partition_graphs()) m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity") self.assertEqual(1, len(m_dumps)) self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor()) x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity") self.assertEqual(1, len(x_dumps)) self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebuggingDuringOpError(self): """Test the debug tensor dumping when error occurs in graph runtime.""" with session.Session() as sess: ph = tf.placeholder(tf.float32, name="mismatch/ph") x = tf.transpose(ph, name="mismatch/x") m = constant_op.constant( np.array( [[1.0, 2.0]], dtype=np.float32), name="mismatch/m") y = math_ops.matmul(m, x, name="mismatch/y") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) with self.assertRaises(errors.OpError): sess.run(y, options=run_options, feed_dict={ph: np.array([[-3.0], [0.0]])}) dump = debug_data.DebugDumpDir(self._dump_root) # Despite the fact that the run() call errored out and partition_graphs # are not available via run_metadata, the partition graphs should still # have been loaded from the dump directory. self.assertTrue(dump.loaded_partition_graphs()) m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity") self.assertEqual(1, len(m_dumps)) self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor()) x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity") self.assertEqual(1, len(x_dumps)) self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugQueueOpsDoesNotoErrorOut(self): with session.Session() as sess: q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue") q_init = q.enqueue_many(([101.0, 202.0, 303.0], ), name="enqueue_many") run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_urls=self._debug_urls()) sess.run(q_init, options=run_options, run_metadata=run_metadata) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) self.assertIsNone( dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]) self.assertAllClose([101.0, 202.0, 303.0], dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testDumpGraphStructureLookup(self): with session.Session() as sess: u_name = "testDumpGraphStructureLookup/u" v_name = "testDumpGraphStructureLookup/v" w_name = "testDumpGraphStructureLookup/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) u_read_name = u_name + "/read" # Test node name list lookup of the DebugDumpDir object. node_names = dump.nodes() self.assertTrue(u_name in node_names) self.assertTrue(u_read_name in node_names) # Test the inputs lookup of the DebugDumpDir object. self.assertEqual([], dump.node_inputs(u_name)) self.assertEqual([u_name], dump.node_inputs(u_read_name)) self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name)) self.assertEqual([v_name] * 2, dump.node_inputs(w_name)) self.assertEqual([], dump.node_inputs(u_name, is_control=True)) self.assertEqual([], dump.node_inputs(u_read_name, is_control=True)) self.assertEqual([], dump.node_inputs(v_name, is_control=True)) self.assertEqual([], dump.node_inputs(w_name, is_control=True)) # Test the outputs recipient lookup of the DebugDumpDir object. self.assertTrue(u_read_name in dump.node_recipients(u_name)) self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name)) self.assertEqual(2, dump.node_recipients(v_name).count(w_name)) self.assertEqual([], dump.node_recipients(u_name, is_control=True)) self.assertEqual([], dump.node_recipients(u_read_name, is_control=True)) self.assertEqual([], dump.node_recipients(v_name, is_control=True)) self.assertEqual([], dump.node_recipients(w_name, is_control=True)) # Test errors raised on invalid node names. with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_inputs(u_name + "foo") with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_recipients(u_name + "foo") # Test transitive_inputs(). self.assertEqual([], dump.transitive_inputs(u_name)) self.assertEqual([u_name], dump.transitive_inputs(u_read_name)) self.assertEqual( set([u_name, u_read_name]), set(dump.transitive_inputs(v_name))) self.assertEqual( set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name))) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.transitive_inputs(u_name + "foo") # Test num_devices(). self.assertEqual(self._expected_num_devices, len(dump.devices())) # Test node_device(). self.assertEqual(self._main_device, dump.node_device(u_name)) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_device(u_name + "foo") # Test node_op_type(). self.assertEqual("Variable", dump.node_op_type(u_name)) self.assertEqual("Identity", dump.node_op_type(u_name + "/read")) self.assertEqual("Add", dump.node_op_type(v_name)) self.assertEqual("Add", dump.node_op_type(w_name)) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_op_type(u_name + "foo") # Now load the dump again, without the parition graphs, so we can check # the errors raised for no partition graphs loaded. dump = debug_data.DebugDumpDir(self._dump_root, validate=False) with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.partition_graphs() with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partiton graphs yet"): dump.node_inputs(u_name) with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.nodes() with self.assertRaisesRegexp( RuntimeError, "Node recipients are not loaded from partiton graphs yet"): dump.node_recipients(u_name) with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partiton graphs yet"): dump.transitive_inputs(u_name) with self.assertRaisesRegexp( RuntimeError, "Devices are not loaded from partiton graphs yet"): dump.devices() with self.assertRaisesRegexp( RuntimeError, "Node devices are not loaded from partiton graphs yet"): dump.node_device(u_name) with self.assertRaisesRegexp( RuntimeError, "Node op types are not loaded from partiton graphs yet"): dump.node_op_type(u_name)
#tfdbg> /0\.000 #tfdbg> ni -t cross_entropy/Log #tfdbg> quit #python -m tensorflow.python.debug.examples.debug_mnist --debug # In[ ]: from tensorflow.python.debug import debug_utils # ... Code where your session and graph are set up... run_options = tf.RunOptions() debug_utils.watch_graph( run_options, session.graph, debug_urls=["file:///shared/storage/location/tfdbg_dumps_1"]) # Be sure to use different directories for different run() calls. session.run(fetches, feed_dict=feeds, options=run_options) # python -m tensorflow.python.debug.cli.offline_analyzer \ --dump_dir=/shared/storage/location/tfdbg_dumps_1 # In[ ]: # Let your BUILD target depend on "//tensorflow/python/debug:debug_py # (You don't need to worry about the BUILD dependency if you are using a pip # install of open-source TensorFlow.)
def testWatchingUnconnectedOutputTensor(self): """Watch an output slot not emitting any edges. (Not even control edges from the node.) """ with session.Session() as sess: x_init = constant_op.constant([2, 2, 3, 5, 5]) x = variables.Variable(x_init, name="unconnected/x") # The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the # graph. Let the debugger watch the unused slot 1. unique_x, _ = tf.unique(x, name="unconnected/unique_x") y = tf.add(unique_x, [0, 1, 2], name="unconnected/y") x.initializer.run() # Verify that only slot 0 of unique_x has recipients, while slot 1 of the # same node does not have recipients. unique_x_slot_0_recipients = [] unique_x_slot_1_recipients = [] for op in sess.graph.get_operations(): for inp in op.inputs: if inp.name == "unconnected/unique_x:0": unique_x_slot_0_recipients.append(op.name) elif inp.name == "unconnected/unique_x:1": unique_x_slot_1_recipients.append(op.name) self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients) self.assertEqual([], unique_x_slot_1_recipients) run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph(run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() result = sess.run(y, options=run_options, run_metadata=run_metadata) self.assertAllClose([2, 4, 7], result) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Assert that the connected slot (slot 0) is dumped properly. unique_x_slot_0_dumps = dump.watch_key_to_data( "unconnected/unique_x:0:DebugIdentity") self.assertEqual(1, len(unique_x_slot_0_dumps)) self.assertEqual("unconnected/unique_x", unique_x_slot_0_dumps[0].node_name) self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot) self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor()) # Assert that the unconnected slot (slot 1) is dumped properly. unique_x_slot_1_dumps = dump.watch_key_to_data( "unconnected/unique_x:1:DebugIdentity") self.assertEqual(1, len(unique_x_slot_1_dumps)) self.assertEqual("unconnected/unique_x", unique_x_slot_1_dumps[0].node_name) self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot) self.assertAllClose([0, 0, 1, 2, 2], unique_x_slot_1_dumps[0].get_tensor())
def testDumpGraphStructureLookup(self): # TODO(cais): Separate this test into multiple test methods. with session.Session() as sess: u_name = "testDumpGraphStructureLookup/u" v_name = "testDumpGraphStructureLookup/v" w_name = "testDumpGraphStructureLookup/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) u_read_name = u_name + "/read" # Test node name list lookup of the DebugDumpDir object. node_names = dump.nodes() self.assertTrue(u_name in node_names) self.assertTrue(u_read_name in node_names) # Test querying node attributes. u_attr = dump.node_attributes(u_name) self.assertEqual(dtypes.float32, u_attr["dtype"].type) self.assertEqual(1, len(u_attr["shape"].shape.dim)) self.assertEqual(2, u_attr["shape"].shape.dim[0].size) with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"): dump.node_attributes("foo") # Test querying the debug watch keys with node names. self.assertEqual(["%s:0:DebugIdentity" % u_name], dump.debug_watch_keys(u_name)) self.assertEqual(["%s:0:DebugIdentity" % v_name], dump.debug_watch_keys(v_name)) self.assertEqual(["%s:0:DebugIdentity" % w_name], dump.debug_watch_keys(w_name)) self.assertEqual([], dump.debug_watch_keys("foo")) # Test querying debug datum instances from debug watch. u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0]) self.assertEqual(1, len(u_data)) self.assertEqual(u_name, u_data[0].node_name) self.assertEqual(0, u_data[0].output_slot) self.assertEqual("DebugIdentity", u_data[0].debug_op) self.assertGreaterEqual(u_data[0].timestamp, 0) self.assertEqual([], dump.watch_key_to_data("foo")) # Test the inputs lookup of the DebugDumpDir object. self.assertEqual([], dump.node_inputs(u_name)) self.assertEqual([u_name], dump.node_inputs(u_read_name)) self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name)) self.assertEqual([v_name] * 2, dump.node_inputs(w_name)) self.assertEqual([], dump.node_inputs(u_name, is_control=True)) self.assertEqual([], dump.node_inputs(u_read_name, is_control=True)) self.assertEqual([], dump.node_inputs(v_name, is_control=True)) self.assertEqual([], dump.node_inputs(w_name, is_control=True)) # Test the outputs recipient lookup of the DebugDumpDir object. self.assertTrue(u_read_name in dump.node_recipients(u_name)) self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name)) self.assertEqual(2, dump.node_recipients(v_name).count(w_name)) self.assertEqual([], dump.node_recipients(u_name, is_control=True)) self.assertEqual([], dump.node_recipients(u_read_name, is_control=True)) self.assertEqual([], dump.node_recipients(v_name, is_control=True)) self.assertEqual([], dump.node_recipients(w_name, is_control=True)) # Test errors raised on invalid node names. with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_inputs(u_name + "foo") with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_recipients(u_name + "foo") # Test transitive_inputs(). self.assertEqual([], dump.transitive_inputs(u_name)) self.assertEqual([u_name], dump.transitive_inputs(u_read_name)) self.assertEqual( set([u_name, u_read_name]), set(dump.transitive_inputs(v_name))) self.assertEqual( set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name))) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.transitive_inputs(u_name + "foo") # Test num_devices(). self.assertEqual(self._expected_num_devices, len(dump.devices())) # Test node_device(). self.assertEqual(self._main_device, dump.node_device(u_name)) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_device(u_name + "foo") # Test node_exists(). self.assertTrue(dump.node_exists(u_name)) self.assertTrue(dump.node_exists(u_name + "/read")) self.assertFalse(dump.node_exists(u_name + "/read" + "/foo")) # Test node_op_type(). self.assertEqual("Variable", dump.node_op_type(u_name)) self.assertEqual("Identity", dump.node_op_type(u_name + "/read")) self.assertEqual("Add", dump.node_op_type(v_name)) self.assertEqual("Add", dump.node_op_type(w_name)) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_op_type(u_name + "foo") # Now load the dump again, without the parition graphs, so we can check # the errors raised for no partition graphs loaded. dump = debug_data.DebugDumpDir(self._dump_root, validate=False) with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.partition_graphs() with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partition graphs yet"): dump.node_inputs(u_name) with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.nodes() with self.assertRaisesRegexp( RuntimeError, "Node recipients are not loaded from partition graphs yet"): dump.node_recipients(u_name) with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partition graphs yet"): dump.transitive_inputs(u_name) with self.assertRaisesRegexp( RuntimeError, "Devices are not loaded from partition graphs yet"): dump.devices() with self.assertRaisesRegexp( RuntimeError, "Node devices are not loaded from partition graphs yet"): dump.node_device(u_name) with self.assertRaisesRegexp( RuntimeError, "Node op types are not loaded from partition graphs yet"): dump.node_op_type(u_name)
def testDumpCausalityCheck(self): with session.Session() as sess: u_name = "testDumpCausalityCheck/u" v_name = "testDumpCausalityCheck/v" w_name = "testDumpCausalityCheck/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) # First, loading the original dump without supplying the # partition_graphs should not cause a RuntimeError, validation occurs # only with partition_graphs loaded. debug_data.DebugDumpDir(self._dump_root) # Now, loading the original dump with partition graphs supplied should # succeed. The validation should pass quietly. dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Get the dump file names and compute their timestamps. self.assertEqual( 1, len(dump.get_tensor_file_paths(u_name, 0, "DebugIdentity"))) u_file_path = dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")[0] self.assertEqual( 1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity"))) v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0] u_timestamp = int(u_file_path[u_file_path.rindex("_") + 1:]) v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:]) # Swap the time stamps new_u_file_path = u_file_path[:u_file_path.rindex( "_")] + "_%d" % v_timestamp new_v_file_path = v_file_path[:v_file_path.rindex( "_")] + "_%d" % u_timestamp os.rename(u_file_path, new_u_file_path) os.rename(v_file_path, new_v_file_path) # Load the dump directory again. Now a ValueError is expected to be # raised due to the timestamp swap. with self.assertRaisesRegexp(ValueError, "Causality violated"): dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) # Loading the dump directory with kwarg "validate" set explicitly to # False should get rid of the error. dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs, validate=False)
def testDumpGraphStructureLookup(self): # TODO(cais): Separate this test into multiple test methods. with session.Session() as sess: u_name = "testDumpGraphStructureLookup/u" v_name = "testDumpGraphStructureLookup/v" w_name = "testDumpGraphStructureLookup/w" u_init = constant_op.constant([2.0, 4.0]) u = variables.Variable(u_init, name=u_name) v = math_ops.add(u, u, name=v_name) w = math_ops.add(v, v, name=w_name) u.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls=self._debug_urls()) run_metadata = config_pb2.RunMetadata() sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) u_read_name = u_name + "/read" # Test node name list lookup of the DebugDumpDir object. node_names = dump.nodes() self.assertTrue(u_name in node_names) self.assertTrue(u_read_name in node_names) # Test querying node attributes. u_attr = dump.node_attributes(u_name) self.assertEqual(dtypes.float32, u_attr["dtype"].type) self.assertEqual(1, len(u_attr["shape"].shape.dim)) self.assertEqual(2, u_attr["shape"].shape.dim[0].size) with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"): dump.node_attributes("foo") # Test querying the debug watch keys with node names. self.assertEqual(["%s:0:DebugIdentity" % u_name], dump.debug_watch_keys(u_name)) self.assertEqual(["%s:0:DebugIdentity" % v_name], dump.debug_watch_keys(v_name)) self.assertEqual(["%s:0:DebugIdentity" % w_name], dump.debug_watch_keys(w_name)) self.assertEqual([], dump.debug_watch_keys("foo")) # Test querying debug datum instances from debug watch. u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0]) self.assertEqual(1, len(u_data)) self.assertEqual(u_name, u_data[0].node_name) self.assertEqual(0, u_data[0].output_slot) self.assertEqual("DebugIdentity", u_data[0].debug_op) self.assertGreaterEqual(u_data[0].timestamp, 0) self.assertEqual([], dump.watch_key_to_data("foo")) # Test the inputs lookup of the DebugDumpDir object. self.assertEqual([], dump.node_inputs(u_name)) self.assertEqual([u_name], dump.node_inputs(u_read_name)) self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name)) self.assertEqual([v_name] * 2, dump.node_inputs(w_name)) self.assertEqual([], dump.node_inputs(u_name, is_control=True)) self.assertEqual([], dump.node_inputs(u_read_name, is_control=True)) self.assertEqual([], dump.node_inputs(v_name, is_control=True)) self.assertEqual([], dump.node_inputs(w_name, is_control=True)) # Test the outputs recipient lookup of the DebugDumpDir object. self.assertTrue(u_read_name in dump.node_recipients(u_name)) self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name)) self.assertEqual(2, dump.node_recipients(v_name).count(w_name)) self.assertEqual([], dump.node_recipients(u_name, is_control=True)) self.assertEqual([], dump.node_recipients(u_read_name, is_control=True)) self.assertEqual([], dump.node_recipients(v_name, is_control=True)) self.assertEqual([], dump.node_recipients(w_name, is_control=True)) # Test errors raised on invalid node names. with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_inputs(u_name + "foo") with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_recipients(u_name + "foo") # Test transitive_inputs(). self.assertEqual([], dump.transitive_inputs(u_name)) self.assertEqual([u_name], dump.transitive_inputs(u_read_name)) self.assertEqual( set([u_name, u_read_name]), set(dump.transitive_inputs(v_name))) self.assertEqual( set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name))) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.transitive_inputs(u_name + "foo") # Test num_devices(). self.assertEqual(self._expected_num_devices, len(dump.devices())) # Test node_device(). self.assertEqual(self._main_device, dump.node_device(u_name)) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_device(u_name + "foo") # Test node_exists(). self.assertTrue(dump.node_exists(u_name)) self.assertTrue(dump.node_exists(u_name + "/read")) self.assertFalse(dump.node_exists(u_name + "/read" + "/foo")) # Test node_op_type(). self.assertEqual("Variable", dump.node_op_type(u_name)) self.assertEqual("Identity", dump.node_op_type(u_name + "/read")) self.assertEqual("Add", dump.node_op_type(v_name)) self.assertEqual("Add", dump.node_op_type(w_name)) with self.assertRaisesRegexp(ValueError, "does not exist in partition graphs"): dump.node_op_type(u_name + "foo") # Now load the dump again, without the parition graphs, so we can check # the errors raised for no partition graphs loaded. dump = debug_data.DebugDumpDir(self._dump_root, validate=False) with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.partition_graphs() self.assertFalse(dump.loaded_partition_graphs()) with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partition graphs yet"): dump.node_inputs(u_name) with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.nodes() with self.assertRaisesRegexp( RuntimeError, "Node recipients are not loaded from partition graphs yet"): dump.node_recipients(u_name) with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partition graphs yet"): dump.transitive_inputs(u_name) with self.assertRaisesRegexp( RuntimeError, "Devices are not loaded from partition graphs yet"): dump.devices() with self.assertRaisesRegexp( RuntimeError, "Node devices are not loaded from partition graphs yet"): dump.node_device(u_name) with self.assertRaisesRegexp( RuntimeError, "Node op types are not loaded from partition graphs yet"): dump.node_op_type(u_name)