def write_debug_trace(x): # DebugIdentityV2 is a stateful op. It ought to be included by auto # control dependency. square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root, another_debug_url]) return square + 1.0
def write_debug_trace(x): # DebugIdentityV2 is a stateful op. It ought to be included by auto # control dependency. square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", op_name="Square", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root], circular_buffer_size=circular_buffer_size) return square
def collatz(x): counter = constant_op.constant(0, dtype=dtypes.int32) while math_ops.greater(x, 1): counter = counter + 1 gen_debug_ops.debug_identity_v2( x, tfdbg_context_id="deadbeaf", op_name="x", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) if math_ops.equal(x % 2, 0): x = math_ops.div(x, 2) else: x = x * 3 + 1 return counter
def write_debug_trace(x): square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", op_name="Square", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) sqrt = math_ops.sqrt(x) gen_debug_ops.debug_identity_v2( sqrt, tfdbg_context_id="beafdead", op_name="Sqrt", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) return square + sqrt
def _instrument_symbolic_tensors(tensors, op_name, tfdbg_context_id): """Add debugging instrumentation for symbolic (i.e., non-eager) tensors. The detailed fashion in which the tensors are instrumented is determined by the tensor_debug_mode configured for the currently enabled dumping callback. Args: tensors: A tuple of Tensors to instrument. It is assumed that their ordering corresponds to the ordering of output tensors of an original op. Output slot indices (0-based) will be generated based on the ordering. op_name: Name of the op that emits the Tensors. tfdbg_context_id: A unique ID for the context that the op belongs to (e.g., a graph). Returns: Non-eager Tensors that override the `tensors` as the output of the op that originally generated `tensors`. In some cases (e.g., non-V1 graph mode), this may be `None`, as the instrumentation can simply rely on automatic control dependencies (see `auto_control_deps.py`) instead of tensor overriding. """ if (_state.config.tensor_debug_mode == debug_event_pb2.TensorDebugMode.NO_TENSOR): is_v1_graph_mode = not ops.executing_eagerly_outside_functions() instrumented_tensors = [] if is_v1_graph_mode else None for slot, tensor in enumerate(tensors): with ops.colocate_with(None, ignore_existing=True): # Except in V1 graph mode + control flow, debug_identity_v2 trigger auto # control dependency because it's a stateful op. debug_tensor = gen_debug_ops.debug_identity_v2( # Use an empty (shape=[0]) float32 tensor for the NO_TENSOR mode. constant_op.constant([], dtype=dtypes.float32), tfdbg_context_id=tfdbg_context_id, op_name=op_name, output_slot=slot, tensor_debug_mode=_state.config.tensor_debug_mode, debug_urls=["file://%s" % _state.config.dump_root]) if is_v1_graph_mode: # TODO(cais): Evaluate performance optimization options. For the # `NO_TENSOR` debug mode, an alternative is to add `debug_tensor` as a # control dependency of `tensor.op` without an additional identity op. identity = array_ops.identity(tensor) identity.op._add_control_input( # pylint: disable=protected-access debug_tensor.op) instrumented_tensors.append(identity) return instrumented_tensors else: raise NotImplementedError( "Symbolic tensor instrumentation is not implemented for debug mode %s" % _state.config.tensor_debug_mode)
def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None): if op_name is not None and self._op_regex.match(op_name): graph_name = "missing-graph-name" if graph is not None and hasattr(graph, "name"): graph_name = graph.name logging.info( "Adding dump op for '%s' of type '%s' from graph '%s'" % (op_name, op_type, graph_name)) new_outputs = [] for output_slot, output in enumerate(outputs): if self._output_regex is not None and not self._output_regex.match( output.name): logging.info("Skipped output: " + output.name) new_outputs.append(output) continue debug_identity_op_kwargs = { "tfdbg_context_id": graph_name, "op_name": op_name, "output_slot": output_slot, "tensor_debug_mode": self._tensor_debug_mode, "debug_urls": ["file://%s" % self._dump_root], "name": "dump_%d" % self._dump_op_counter } if not tf.__version__.startswith("2.2"): debug_identity_op_kwargs[ "circular_buffer_size"] = self._circular_buffer_size debug_identity_op_kwargs[ "tfdbg_run_id"] = self._tfdbg_run_id self._dump_op_counter = self._dump_op_counter + 1 new_outputs.append( gen_debug_ops.debug_identity_v2( output, **debug_identity_op_kwargs)) return new_outputs else: return None
def _instrument_symbolic_tensors(self, tensors, op_type, op_name, tfdbg_context_id, tensor_ids): """Add debugging instrumentation for symbolic (i.e., non-eager) tensors. The detailed fashion in which the tensors are instrumented is determined by the tensor_debug_mode configured for the currently enabled dumping callback. Args: tensors: A tuple of Tensors to instrument. It is assumed that their ordering corresponds to the ordering of output tensors of an original op. Output slot indices (0-based) will be generated based on the ordering. op_type: Type name of the op that emits the Tensors (e.g., "MatMul"). op_name: Name of the op that emits the Tensors (e.g., "dense_1/MatMul"). tfdbg_context_id: A unique ID for the context that the op belongs to (e.g., a graph). tensor_ids: A list of unique ID numbers for the tensors, for tfdbg's internal use. Returns: Non-eager Tensors that override the `tensors` as the output of the op that originally generated `tensors`. In some cases (e.g., non-V1 graph mode), this may be `None`, as the instrumentation can simply rely on automatic control dependencies (see `auto_control_deps.py`) instead of tensor overriding. """ # TODO(b/144441464, b/144440920, b/144440922): Make use of it. tensor_debug_mode = self._tensor_debug_mode debug_urls = ["file://%s" % self._dump_root] is_v1_graph_mode = not ops.executing_eagerly_outside_functions() instrumented_tensors = [] if is_v1_graph_mode else None if tensor_debug_mode == debug_event_pb2.TensorDebugMode.NO_TENSOR: for output_slot, tensor in enumerate(tensors): if (not self._should_dump_tensor(op_type, tensor.dtype) or not tensor.dtype.is_numpy_compatible): # Instrumenting DT_VARIANT and DT_RESOURCE type tensors under # V1 graph mode is known to have issues. TODO(cais): Investigate. if is_v1_graph_mode: instrumented_tensors.append(tensor) continue if is_v1_graph_mode and not tensor.dtype.is_numpy_compatible: instrumented_tensors.append(tensor) continue # Except in V1 graph mode + control flow, debug_identity_v2 trigger auto # control dependency because it's a stateful op. debug_tensor = gen_debug_ops.debug_identity_v2( # Use an empty (shape=[0]) float32 tensor for the NO_TENSOR mode # as a low-overhead placeholder, since no actual tensor value is # traced. constant_op.constant([], dtype=dtypes.float32), tfdbg_context_id=tfdbg_context_id, op_name=op_name, output_slot=output_slot, tensor_debug_mode=self._tensor_debug_mode, debug_urls=debug_urls) if is_v1_graph_mode: # TODO(cais): Evaluate performance optimization options. For the # `NO_TENSOR` debug mode, an alternative is to add `debug_tensor` as a # control dependency of `tensor.op` without an additional identity op. identity = array_ops.identity(tensor) identity.op._add_control_input( # pylint: disable=protected-access debug_tensor.op) instrumented_tensors.append(identity) return instrumented_tensors elif tensor_debug_mode in ( debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.SHAPE): for output_slot, tensor in enumerate(tensors): dtype = tensor.dtype dtype_is_dumpable = ( tensor_debug_mode in (debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH) and dtype.is_floating or tensor_debug_mode == debug_event_pb2.TensorDebugMode.SHAPE and (dtype.is_floating or dtype.is_integer or dtype.is_bool)) if (not self._should_dump_tensor(op_type, tensor.dtype) or not dtype_is_dumpable): if is_v1_graph_mode: instrumented_tensors.append(tensor) continue debug_tensor = gen_debug_ops.debug_identity_v2( gen_debug_ops.debug_numeric_summary_v2( tensor, tensor_id=tensor_ids[output_slot], tensor_debug_mode=self._tensor_debug_mode, output_dtype=dtypes.float64), tfdbg_context_id=tfdbg_context_id, op_name=op_name, output_slot=output_slot, tensor_debug_mode=self._tensor_debug_mode, debug_urls=debug_urls) if is_v1_graph_mode: identity = array_ops.identity(tensor) identity.op._add_control_input( # pylint: disable=protected-access debug_tensor.op) instrumented_tensors.append(identity) return instrumented_tensors elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR: for output_slot, tensor in enumerate(tensors): if (not self._should_dump_tensor(op_type, tensor.dtype) or not tensor.dtype.is_numpy_compatible): # Instrumenting DT_VARIANT and DT_RESOURCE type tensors under # V1 graph mode is known to have issues. TODO(cais): Investigate. if is_v1_graph_mode: instrumented_tensors.append(tensor) continue debug_tensor = gen_debug_ops.debug_identity_v2( tensor, tfdbg_context_id=tfdbg_context_id, op_name=op_name, output_slot=output_slot, tensor_debug_mode=self._tensor_debug_mode, debug_urls=debug_urls) if is_v1_graph_mode: instrumented_tensors.append(debug_tensor) return instrumented_tensors else: raise NotImplementedError( "Symbolic tensor instrumentation is not implemented for debug mode " "%s" % self._tensor_debug_mode)
def _instrument_symbolic_tensors(self, tensors, op_type, op_name, tfdbg_context_id, tensor_ids): """Add debugging instrumentation for symbolic (i.e., non-eager) tensors. The detailed fashion in which the tensors are instrumented is determined by the tensor_debug_mode configured for the currently enabled dumping callback. Args: tensors: A tuple of Tensors to instrument. It is assumed that their ordering corresponds to the ordering of output tensors of an original op. Output slot indices (0-based) will be generated based on the ordering. op_type: Type name of the op that emits the Tensors (e.g., "MatMul"). op_name: Name of the op that emits the Tensors (e.g., "dense_1/MatMul"). tfdbg_context_id: A unique ID for the context that the op belongs to (e.g., a graph). tensor_ids: A list of unique ID numbers for the tensors, for tfdbg's internal use. Returns: Non-eager Tensors that override the `tensors` as the output of the op that originally generated `tensors`. In some cases (e.g., non-V1 graph mode), this may be `None`, as the instrumentation can simply rely on automatic control dependencies (see `auto_control_deps.py`) instead of tensor overriding. """ tensor_debug_mode = self._tensor_debug_mode debug_urls = ["file://%s" % self._dump_root] is_v1_graph_mode = not ops.executing_eagerly_outside_functions() instrumented_tensors = [] if is_v1_graph_mode else None for output_slot, tensor in enumerate(tensors): with self._symbolic_tensor_counter_lock: debug_identity_name = ("DebugIdentityV2_%d" % self._symbolic_tensor_counter) debug_identity_op_kwargs = { "tfdbg_context_id": tfdbg_context_id, "op_name": op_name, "output_slot": output_slot, "tensor_debug_mode": self._tensor_debug_mode, "debug_urls": debug_urls, "name": debug_identity_name, } if tf_compat.forward_compatible(2020, 6, 24): debug_identity_op_kwargs[ "circular_buffer_size"] = self._circular_buffer_size if tf_compat.forward_compatible(2020, 7, 1): debug_identity_op_kwargs["tfdbg_run_id"] = self._tfdbg_run_id if tensor_debug_mode == debug_event_pb2.TensorDebugMode.NO_TENSOR: if (not self._should_dump_tensor(op_type, tensor.dtype) or not tensor.dtype.is_numpy_compatible): if is_v1_graph_mode: instrumented_tensors.append(tensor) continue if is_v1_graph_mode and not tensor.dtype.is_numpy_compatible: # Avoid instrumenting Placeholder under is_v1_graph_mode. Doing that # would cause runtime complaint about Placeholders not being fed. instrumented_tensors.append(tensor) continue # Except in V1 graph mode + control flow, debug_identity_v2 triggers # auto control dependency because it's a stateful op. debug_tensor = gen_debug_ops.debug_identity_v2( # Use an empty (shape=[0]) float32 tensor for the NO_TENSOR mode # as a low-overhead placeholder, since no actual tensor value is # traced. constant_op.constant([], dtype=dtypes.float32), **debug_identity_op_kwargs) if is_v1_graph_mode: instrumented_tensors.append( self._process_v1_graph_mode_tensor( op_type, tensor, debug_tensor, tensor_debug_mode)) elif tensor_debug_mode in ( debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH, debug_event_pb2.TensorDebugMode.SHAPE): dtype = tensor.dtype dtype_is_dumpable = ( tensor_debug_mode in (debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH) and dtype.is_floating or tensor_debug_mode == debug_event_pb2.TensorDebugMode.SHAPE and (dtype.is_floating or dtype.is_integer or dtype.is_bool)) if (not self._should_dump_tensor(op_type, tensor.dtype) or not dtype_is_dumpable): if is_v1_graph_mode: instrumented_tensors.append(tensor) continue debug_tensor = gen_debug_ops.debug_identity_v2( gen_debug_ops.debug_numeric_summary_v2( tensor, tensor_id=tensor_ids[output_slot], tensor_debug_mode=self._tensor_debug_mode, output_dtype=dtypes.float64), **debug_identity_op_kwargs) if is_v1_graph_mode: instrumented_tensors.append( self._process_v1_graph_mode_tensor( op_type, tensor, debug_tensor, tensor_debug_mode)) elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR: if (not self._should_dump_tensor(op_type, tensor.dtype) or not tensor.dtype.is_numpy_compatible): # Instrumenting DT_VARIANT and DT_RESOURCE type tensors under # V1 graph mode is known to have issues. TODO(cais): Investigate. if is_v1_graph_mode: instrumented_tensors.append(tensor) continue debug_tensor = gen_debug_ops.debug_identity_v2( tensor, **debug_identity_op_kwargs) if is_v1_graph_mode: instrumented_tensors.append( self._process_v1_graph_mode_tensor( op_type, tensor, debug_tensor, tensor_debug_mode)) else: raise NotImplementedError( "Symbolic tensor instrumentation is not implemented for debug mode " "%s" % self._tensor_debug_mode) return instrumented_tensors