def testEagerGraphOpConstructionWhileLoopControlFlow(self):
        instrument = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument.callback):

            @def_function.function
            def my_function_with_while(counter, lim, accum):
                while math_ops.less(counter, lim):
                    accum.assign_add(accum)
                    counter.assign_add(1.0)

            counter = variables.Variable(0.0)
            lim = constant_op.constant(4.0, dtype=dtypes.float32)
            accum = variables.Variable(1.0)
            my_function_with_while(counter, lim, accum)
        self.assertAllClose(accum.read_value(), 16.0)
        self.assertIn(_WHILE_OP, instrument.graph_op_types)
        self.assertIn(_LESS_OP, instrument.graph_op_types)
        self.assertIn(_ASSIGN_ADD_VARIABLE_OP, instrument.graph_op_types)
        self.assertEqual(len(instrument.graph_op_names),
                         len(instrument.graph_op_types))

        # Check the graph internal ndarrays recorded at runtime.
        read_variable_op_outputs = instrument.graph_internal_ndarrays[
            _READ_VARIALBE_OP]
        self.assertAllClose(read_variable_op_outputs, [1.0, 2.0, 4.0, 8.0])
        less_op_outputs = instrument.graph_internal_ndarrays[_LESS_OP]
        self.assertAllClose(less_op_outputs, [True, True, True, True, False])
    def testSparseTensorFuncGraph(self):
        instrument = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument.callback):

            @def_function.function
            def dense_matmul(sp, w):
                return sparse_ops.sparse_tensor_dense_matmul(sp, w)

            indices = [[1, 2], [2, 0], [3, 4]]
            values = [0.0, 8.0, -2.0]
            shape = [4, 5]
            sp = sparse_tensor.SparseTensorValue(indices, values, shape)
            w = ops.convert_to_tensor(np.ones([5, 1], np.float32))
            y = dense_matmul(sp, w)
            self.assertAllClose(y, [[0.0], [0.0], [8.0], [-2.0]])
            self.assertIn(_SPARSE_TENSOR_DENSE_MATMUL_OP,
                          instrument.graph_op_types)
            self.assertIn(
                dense_matmul.get_concrete_function(sp, w).name,
                instrument.eager_op_types)

        # Check the graph internal ndarrays recorded at runtime.
        sparse_matmul_outputs = instrument.graph_internal_ndarrays[
            _SPARSE_TENSOR_DENSE_MATMUL_OP + b"/" +
            _SPARSE_TENSOR_DENSE_MATMUL_OP]
        self.assertEqual(len(sparse_matmul_outputs), 1)
        self.assertAllClose(sparse_matmul_outputs[0],
                            [[0.0], [0.0], [8.0], [-2.0]])
    def testMultiThreadedEagerOpExecution(self):
        # Instrument for the main thread.
        instrument_0 = _NumpyFunctionCallback()

        # Instrument for the to-be-created thread.
        instrument_1 = _NumpyFunctionCallback()

        def thread_1_job():
            with op_callbacks.op_callback(instrument_1.callback):
                x = constant_op.constant(6.0)
                y = math_ops.square(math_ops.log(x))
                return y

        thread_1 = threading.Thread(target=thread_1_job)
        thread_1.start()

        # While thread_1 is ongoing, do something on the main thread.
        with op_callbacks.op_callback(instrument_0.callback):
            x = constant_op.constant(2.0)
            y = math_ops.cos(x)
            self.assertAllClose(y, np.cos(2.0))

        thread_1.join()

        self.assertEqual(instrument_0.eager_op_types, [_COS_OP])
        self.assertEqual(instrument_0.eager_op_names, [None])
        self.assertEqual(instrument_1.eager_op_types, [_LOG_OP, _SQUARE_OP])
        self.assertEqual(instrument_1.eager_op_names, [None, None])
    def testGraphOpAttributesAreCapture(self):
        instrument = _NumpyFunctionCallback()
        with op_callbacks.op_callback(instrument.callback):

            @def_function.function
            def my_matmul(m, x):
                return math_ops.matmul(m,
                                       x,
                                       transpose_a=True,
                                       transpose_b=False)

            m = constant_op.constant([[1.0, -1.0], [0.0, 1.0]])
            x = constant_op.constant([[-2.0], [3.0]])
            y = my_matmul(m, x)
            self.assertAllClose(y, [[-2.0], [5.0]])
        index = instrument.graph_op_types.index(_MATMUL_OP)
        self.assertIsInstance(instrument.graph_attrs[index], tuple)
        self.assertEqual(
            instrument.graph_attrs[index]
            [instrument.graph_attrs[index].index("transpose_a") + 1].b, True)
        self.assertEqual(
            instrument.graph_attrs[index]
            [instrument.graph_attrs[index].index("transpose_b") + 1].b, False)
        self.assertEqual(len(instrument.eager_attrs), 1)
        self.assertIsInstance(instrument.eager_attrs[0], tuple)
示例#5
0
  def testKeraModelFit(self):
    # TODO(cais): The purely PyFunc (numpy_function) based instrumentation
    # doesn't work for the entire Keras model and its fit() call, due to some
    # shape inference limitations. Use tfdbg's gen_debug_ops for testing
    # instead (b/139668469).
    instrument = _NumpyFunctionCallback(instrument_graph_ops=False)

    with op_callbacks.op_callback(instrument.callback):
      model = keras.Sequential()
      model.add(keras.layers.Dense(10, input_shape=(8,), activation="relu"))
      model.add(keras.layers.BatchNormalization())
      model.add(keras.layers.Dense(1, activation="linear"))
      model.compile(loss="mse", optimizer="adam")

      batch_size = 4
      xs = random_ops.random_normal([batch_size, 8])
      ys = random_ops.random_normal([batch_size, 1])
      history = model.fit(xs, ys, epochs=2, verbose=0)

      # Simply assert that the training proceeded as expected and that
      # op callbacks are invoked. We prefer not to assert on the details of the
      # graph construction and the execution, in order to avoid future
      # maintenance cost.
      self.assertEqual(len(history.history["loss"]), 2)
      self.assertTrue(instrument.graph_op_types)
      self.assertEqual(len(instrument.graph_op_types),
                       len(instrument.graph_op_names))
      self.assertTrue(instrument.eager_op_types)
示例#6
0
  def testOpCallbackWorksWithGradientTape(self):
    instrument = _NumpyFunctionCallback()

    with op_callbacks.op_callback(instrument.callback):
      v = variables.Variable(3.0, dtype=dtypes.float32)
      @def_function.function
      def get_gradients():
        with backprop.GradientTape() as tape:
          loss = math_ops.sin(math_ops.square(v))
          gradients = tape.gradient(loss, v)
        return gradients

      gradients = get_gradients()
      # Applying the chain rule.
      self.assertAllClose(gradients, np.cos(3.0 * 3.0) * 3.0 * 2.0)
      self.assertIn(_SQUARE_OP, instrument.graph_op_types)
      self.assertIn(_SIN_OP, instrument.graph_op_types)
      # The mul and cos ops are created for backprop.
      self.assertIn(_MUL_OP, instrument.graph_op_types)
      self.assertIn(_COS_OP, instrument.graph_op_types)

      # Check the ndarrays from runtime.
      cos_op_outputs = instrument.graph_internal_ndarrays[_COS_OP]
      self.assertEqual(len(cos_op_outputs), 1)
      self.assertAllClose(cos_op_outputs[0], np.cos(3.0 * 3.0))
    def testSimpleGraphConstructionScopeOutsideFunction(self):
        instrument = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument.callback):

            @def_function.function
            def log_2plus_unique_x(x):
                unique_values, unique_pos = array_ops.unique(x)
                return math_ops.log(2.0 + unique_values), unique_pos

            x = constant_op.constant([-1.0, -1.0, 0.0], dtype=dtypes.float32)
            y1, y2 = log_2plus_unique_x(x)
            self.assertAllClose(y1, [0.0, np.log(2.0)])
            self.assertAllClose(y2, [0, 0, 1])
        self.assertIn(_UNIQUE_OP, instrument.graph_op_types)
        self.assertIn(_ADD_OP, instrument.graph_op_types)
        self.assertIn(_LOG_OP, instrument.graph_op_types)
        self.assertEqual(len(instrument.graph_op_names),
                         len(instrument.graph_op_types))

        # Check the graph internal ndarrays recorded at runtime.
        unique_op_outputs = instrument.graph_internal_ndarrays[_UNIQUE_OP]
        self.assertEqual(len(unique_op_outputs), 2)
        self.assertAllClose(unique_op_outputs[0], [-1.0, 0.0])
        self.assertAllClose(unique_op_outputs[1], [0, 0, 1])
        add_op_outputs = instrument.graph_internal_ndarrays[b"add"]
        self.assertEqual(len(add_op_outputs), 1)
        self.assertAllClose(add_op_outputs[0], [1.0, 2.0])
        log_op_outputs = instrument.graph_internal_ndarrays[_LOG_OP]
        self.assertEqual(len(log_op_outputs), 1)
        self.assertAllClose(log_op_outputs[0], [0.0, np.log(2.0)])
    def testEagerGraphOpConstructionIfControlFlow(self):
        instrument = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument.callback):

            @def_function.function
            def my_function_with_cond(x):
                if math_ops.greater(x, 0.0):
                    return x**2.0
                else:
                    return x**3.0

            x = constant_op.constant(-4.0)
            self.assertAllClose(my_function_with_cond(x), -64.0)

        self.assertIn(_IF_OP, instrument.graph_op_types)
        self.assertIn(_GREATER_OP, instrument.graph_op_types)
        self.assertIn(_POW_OP, instrument.graph_op_types)
        self.assertEqual(len(instrument.graph_op_names),
                         len(instrument.graph_op_types))

        # Check the graph internal ndarrays recorded at runtime.
        greater_op_outputs = instrument.graph_internal_ndarrays[_GREATER_OP]
        self.assertEqual(len(greater_op_outputs), 1)
        self.assertAllClose(greater_op_outputs[0], False)
        pow_op_outputs = instrument.graph_internal_ndarrays[b"pow"]
        self.assertEqual(len(pow_op_outputs), 1)
        self.assertAllClose(pow_op_outputs[0], -64.0)
 def testNoOutputOpUnderEagerExecution(self):
     instrument = _NumpyFunctionCallback()
     with op_callbacks.op_callback(instrument.callback):
         x = constant_op.constant(10.0)
         y = constant_op.constant(20.0)
         z = x + y
         w = control_flow_ops.group([z])
         self.assertIsNone(w)
     self.assertEqual(instrument.eager_op_types, [_ADD_OP])
        def thread1_job():
            with op_callbacks.op_callback(instrument_1.callback):

                @def_function.function
                def func1(x):
                    return math_ops.sqrt(math_ops.log(x))

                x = constant_op.constant(4.0)
                self.assertAllClose(func1(x), np.sqrt(np.log(4.0)))
    def testSingleThreadedStack(self):
        instrument_0 = _NumpyFunctionCallback()
        instrument_1 = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument_0.callback):
            self.assertEqual(1, len(op_callbacks._state.callback_stack))
            self.assertEqual(instrument_0.callback,
                             op_callbacks._state.callback_stack[0])

            with op_callbacks.op_callback(instrument_1.callback):
                self.assertEqual(2, len(op_callbacks._state.callback_stack))
                self.assertEqual(instrument_0.callback,
                                 op_callbacks._state.callback_stack[0])
                self.assertEqual(instrument_1.callback,
                                 op_callbacks._state.callback_stack[1])

            self.assertEqual(1, len(op_callbacks._state.callback_stack))
            self.assertEqual(instrument_0.callback,
                             op_callbacks._state.callback_stack[0])

        self.assertEqual(0, len(op_callbacks._state.callback_stack))
示例#12
0
  def testSparseTensorEagerExecution(self):
    instrument = _NumpyFunctionCallback()

    with op_callbacks.op_callback(instrument.callback):
      indices = [[1, 2], [2, 0], [3, 4]]
      values = [0.0, 8.0, -2.0]
      shape = [4, 5]
      sp = sparse_tensor.SparseTensorValue(indices, values, shape)
      w = ops.convert_to_tensor(np.ones([5, 1], np.float32))

      y = sparse_ops.sparse_tensor_dense_matmul(sp, w)
      self.assertAllClose(y, [[0.0], [0.0], [8.0], [-2.0]])
      self.assertIn(_SPARSE_TENSOR_DENSE_MATMUL_OP, instrument.eager_op_types)
      self.assertFalse(instrument.graph_op_types)
    def testGraphConstructionInputsAndGraphAreCapturedCorrectly(self):
        instrument = _NumpyFunctionCallback(instrument_graph_ops=False)

        with op_callbacks.op_callback(instrument.callback):

            @def_function.function
            def log_2plus_unique_x(x):
                unique_values, unique_pos = array_ops.unique(x)
                return math_ops.log(2.0 + unique_values), unique_pos

            x = constant_op.constant([-1.0, -1.0, 0.0], dtype=dtypes.float32)
            y1, y2 = log_2plus_unique_x(x)
            self.assertAllClose(y1, [0.0, np.log(2.0)])
            self.assertAllClose(y2, [0, 0, 1])

        # Check the recorded input tensors.
        self.assertEqual(len(instrument.graph_inputs),
                         len(instrument.graph_op_types))
        unique_inputs = instrument.graph_inputs[
            instrument.graph_op_types.index(_UNIQUE_OP)]
        self.assertIsInstance(unique_inputs, tuple)
        self.assertEqual(len(unique_inputs), 1)
        self.assertEqual(compat.as_bytes(unique_inputs[0].op.op_def.name),
                         _PLACEHOLDER_OP)

        add_inputs = instrument.graph_inputs[instrument.graph_op_types.index(
            _ADD_OP)]
        self.assertIsInstance(add_inputs, tuple)
        self.assertEqual(len(add_inputs), 2)
        self.assertEqual(compat.as_bytes(add_inputs[0].op.op_def.name),
                         _CONSTANT_OP)
        self.assertEqual(compat.as_bytes(add_inputs[1].op.op_def.name),
                         _UNIQUE_OP)

        log_inputs = instrument.graph_inputs[instrument.graph_op_types.index(
            _LOG_OP)]
        self.assertIsInstance(log_inputs, tuple)
        self.assertEqual(len(log_inputs), 1)
        self.assertEqual(compat.as_bytes(log_inputs[0].op.op_def.name),
                         _ADD_OP)

        # Check the recorded graphs.
        self.assertEqual(len(instrument.graph_graphs),
                         len(instrument.graph_op_types))
        self.assertGreater(len(instrument.graph_graph_versions), 1)
        for i in range(len(instrument.graph_graph_versions) - 1):
            self.assertGreater(instrument.graph_graph_versions[i + 1],
                               instrument.graph_graph_versions[i])
示例#14
0
  def testOverrideDTypeInFuncGraph(self):

    def to_float64(op_type, inputs, attrs, outputs, op_name=None, graph=None):
      del op_type, inputs, attrs, op_name, graph  # Unused.
      return [math_ops.cast(output, dtypes.float64) for output in outputs]

    with op_callbacks.op_callback(to_float64):

      @def_function.function
      def add_1_times_2(x):
        return (x + 1.0) * 2.0

      x = constant_op.constant(3.0, dtype=dtypes.float32)
      y = add_1_times_2(x)
      self.assertEqual(y.dtype, dtypes.float64)
      self.assertAllClose(y, 8.0)
 def testEagerOpAttributesAreCapture(self):
     instrument = _NumpyFunctionCallback()
     with op_callbacks.op_callback(instrument.callback):
         m = constant_op.constant([[1.0, -1.0], [0.0, 1.0]])
         x = constant_op.constant([[-2.0], [3.0]])
         y = math_ops.matmul(m, x, transpose_a=True, transpose_b=False)
         self.assertAllClose(y, [[-2.0], [5.0]])
     self.assertEqual(len(instrument.eager_attrs), 1)
     self.assertIsInstance(instrument.eager_attrs[0], tuple)
     self.assertEqual(
         instrument.eager_attrs[0][
             instrument.eager_attrs[0].index("transpose_a") + 1], True)
     self.assertEqual(
         instrument.eager_attrs[0][
             instrument.eager_attrs[0].index("transpose_b") + 1], False)
     self.assertEqual(len(instrument.graph_attrs), 0)
    def testMultiThreadedStacks(self):
        # Instrument for the main thread.
        instrument_0 = _NumpyFunctionCallback()

        # Instrument for the to-be-created thread.
        instrument_1 = _NumpyFunctionCallback()

        def thread1_job():
            with op_callbacks.op_callback(instrument_1.callback):

                @def_function.function
                def func1(x):
                    return math_ops.sqrt(math_ops.log(x))

                x = constant_op.constant(4.0)
                self.assertAllClose(func1(x), np.sqrt(np.log(4.0)))

        thread1 = threading.Thread(target=thread1_job)

        # Start job on separate thread.
        thread1.start()

        # Run something on the main thread.
        with op_callbacks.op_callback(instrument_0.callback):

            @def_function.function
            def func0(x):
                return math_ops.square(math_ops.sin(x))

            x = constant_op.constant(4.0)
            self.assertAllClose(func0(x), np.square(np.sin(4.0)))

        thread1.join()

        # Assert that there is no cross-talk between the main thread
        # and the created thread.
        self.assertIn(_LOG_OP, instrument_1.graph_op_types)
        self.assertIn(_SQRT_OP, instrument_1.graph_op_types)
        self.assertNotIn(_SIN_OP, instrument_1.graph_op_types)
        self.assertNotIn(_SQUARE_OP, instrument_1.graph_op_types)

        self.assertNotIn(_LOG_OP, instrument_0.graph_op_types)
        self.assertNotIn(_SQRT_OP, instrument_0.graph_op_types)
        self.assertIn(_SIN_OP, instrument_0.graph_op_types)
        self.assertIn(_SQUARE_OP, instrument_0.graph_op_types)
    def testMultiThreadedEagerFunctionExecution(self):
        # Instrument for the main thread.
        instrument_0 = _NumpyFunctionCallback()

        # Instrument for the to-be-created thread.
        instrument_1 = _NumpyFunctionCallback()

        @def_function.function
        def square_log(x):
            return math_ops.square(math_ops.log(x))

        # Call the function once, so that the graph construction won't show up
        # in the callback.
        x_float32 = constant_op.constant(6.0, dtype=dtypes.float32)
        x_float64 = constant_op.constant(6.0, dtype=dtypes.float64)
        square_log(x_float32)
        square_log(x_float64)

        def thread_1_job():
            with op_callbacks.op_callback(instrument_1.callback):
                square_log(x_float32)

        thread_1 = threading.Thread(target=thread_1_job)
        thread_1.start()

        # In the meantime, run some computation on the main thread.
        with op_callbacks.op_callback(instrument_0.callback):
            square_log(x_float64)

        thread_1.join()

        # Each of the two dtypes should be associated with its own FuncGraph.
        self.assertIn(
            square_log.get_concrete_function(x_float64).name,
            instrument_0.eager_op_types)
        self.assertEqual(instrument_0.eager_op_names, [None])
        self.assertFalse(instrument_0.graph_op_types)
        self.assertIn(
            square_log.get_concrete_function(x_float32).name,
            instrument_1.eager_op_types)
        self.assertEqual(instrument_1.eager_op_names, [None])
        self.assertFalse(instrument_1.graph_op_types)
    def testOverridingWithWrongNumberOfTensorOutputsErrors(self):
        def wrong_outputs_callback(op_type,
                                   inputs,
                                   attrs,
                                   outputs,
                                   op_name=None,
                                   graph=None):
            del op_type, inputs, attrs, op_name, graph  # Unused.
            return outputs[0], math_ops.negative(outputs[0])

        with op_callbacks.op_callback(wrong_outputs_callback):

            @def_function.function
            def log1p(x):
                return math_ops.log(1.0 + x)

            x = constant_op.constant(3.0)
            with self.assertRaisesRegex(
                    ValueError,
                    r"returned 2 tensors, .* does not match .* \(1\)"):
                log1p(x)
示例#19
0
  def testSimpleGraphConstructionWithCallbackReturningNone(self):
    """Test that callbacks that return None works."""
    op_types = []
    def no_return_callback(op_type,
                           inputs,
                           attrs,
                           outputs,
                           op_name=None,
                           graph=None):
      del inputs, attrs, outputs, op_name, graph  # Unused.
      op_types.append(compat.as_bytes(op_type))

    with op_callbacks.op_callback(no_return_callback):
      @def_function.function
      def log1p(x):
        return math_ops.log(1.0 + x)
      x = constant_op.constant(3.0)
      y = log1p(x)
      self.assertAllClose(y, np.log(4.0))
    self.assertIn(_ADD_OP, op_types)
    self.assertIn(_LOG_OP, op_types)
    def testDatasetMapTest(self):
        instrument = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument.callback):
            tensor = constant_op.constant(
                [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])

            def map_fn(x):
                return math_ops.log(math_ops.square(x) + 1)

            dataset = dataset_ops.Dataset.from_tensor_slices(tensor).batch(
                2).map(map_fn)
            iterator = dataset_ops.make_one_shot_iterator(dataset)

            self.assertAllClose(iterator.next(), np.log([1.25, 2]))
            self.assertAllClose(iterator.next(), np.log([3.25, 5]))

            self.assertIn(_SQUARE_OP, instrument.graph_op_types)
            self.assertIn(_ADD_OP, instrument.graph_op_types)
            self.assertIn(_LOG_OP, instrument.graph_op_types)
            self.assertEqual(len(instrument.eager_op_types),
                             len(instrument.eager_op_names))
    def testEagerFunctionExecution(self):
        instrument = _NumpyFunctionCallback()

        @def_function.function
        def square_log(x):
            return math_ops.square(math_ops.log(x))

        # Call the function once, so that the graph construction won't show up
        # in the callback.
        x_float32 = constant_op.constant(6.0, dtype=dtypes.float32)
        x_float64 = constant_op.constant(6.0, dtype=dtypes.float64)
        square_log(x_float32)
        square_log(x_float64)

        with op_callbacks.op_callback(instrument.callback):
            y = square_log(x_float32)
            self.assertAllClose(y, np.square(np.log(6.0)))
            y = square_log(x_float64)
            self.assertAllClose(y, np.square(np.log(6.0)))

        # Each of the two dtypes should be associated with its own FuncGraph.
        self.assertIn(
            square_log.get_concrete_function(x_float32).name,
            instrument.eager_op_types)
        self.assertIn(
            square_log.get_concrete_function(x_float64).name,
            instrument.eager_op_types)

        self.assertEqual(len(instrument.eager_inputs), 2)
        self.assertIsInstance(instrument.eager_inputs[0], tuple)
        self.assertEqual(instrument.eager_inputs[0][0], x_float32)
        self.assertIsInstance(instrument.eager_inputs[1], tuple)
        self.assertEqual(instrument.eager_inputs[1][0], x_float64)

        self.assertEqual(instrument.eager_op_names, [None, None])
        self.assertFalse(instrument.graph_op_types)
        self.assertFalse(instrument.graph_op_names)
        self.assertFalse(instrument.graph_inputs)
def check_numerics(stack_height_limit=30, path_length_limit=50):
    r"""Creates a context manager that checks numerics of tensors in ops.

  This context manager works for eagerly-executed ops and ops executed in
  `tf.function`s (graphs) in a unified way.

  When a op's float-type output tensor contains any Infinity or NaN, an
  `tf.errors.InvalidArgumentError` will be thrown, with an error message that
  reveals the following information:
    - The type of the op that generated the tensor with bad numerics.
    - Data type (dtype) of the tensor.
    - Shape of the tensor (to the extent known at the time of eager execution
      or graph construction).
    - (Graph mode only): Name of the containing graph.
    - (Graph mode only): The stack trace of the intra-graph op's creation,
      with a stack-height limit and a path-length limit for visual clarity.
      The stack frames that belong to the user's code (as opposed to
      tensorflow's internal code) are highlighted with a text arrow ("->").
    - (Eager mode only): How many of the offending tensor's elements are
      `Infinity` and `NaN`, respectively.

  Args:
    stack_height_limit: Limit to the height of the printed stack trace.
      Applicable only to ops in `tf.function`s (graphs).
    path_length_limit: Limit to the file path included in the printed stack
      trace. Applicable only to ops in `tf.function`s (graphs).

  Returns:
    A thread-local context manager.
  """
    # TODO(cais): Once this is exposed as a public API add code example in the
    # doc string above.

    return op_callbacks.op_callback(
        functools.partial(_check_numerics_callback, stack_height_limit,
                          path_length_limit))
    def testEagerOpExecution(self):
        instrument = _NumpyFunctionCallback()

        with op_callbacks.op_callback(instrument.callback):
            x = constant_op.constant(6.0)
            y = math_ops.square(math_ops.log(x))
            self.assertAllClose(y, np.square(np.log(6.0)))

        self.assertEqual(instrument.eager_op_types, [_LOG_OP, _SQUARE_OP])
        # Op names are unavailable under eager mode.
        self.assertEqual(instrument.eager_op_names, [None, None])
        self.assertEqual(instrument.eager_graphs, [None, None])
        self.assertEqual(len(instrument.eager_inputs), 2)
        self.assertEqual(len(instrument.eager_inputs[0]), 1)
        self.assertIsInstance(instrument.eager_inputs[0], tuple)
        self.assertEqual(instrument.eager_inputs[0][0], x)
        self.assertEqual(len(instrument.eager_inputs[1]), 1)
        self.assertIsInstance(instrument.eager_inputs[1], tuple)
        self.assertAllClose(instrument.eager_inputs[1][0], np.log(6.0))
        self.assertFalse(instrument.graph_op_types)
        self.assertFalse(instrument.graph_op_names)
        self.assertFalse(instrument.graph_attrs)
        self.assertFalse(instrument.graph_graphs)
        self.assertFalse(instrument.graph_inputs)
 def testNonCallableObjectArgErrors(self):
     with self.assertRaisesRegex(ValueError, r"is expected to be callable"):
         with op_callbacks.op_callback(1337):
             pass
 def thread_1_job():
     with op_callbacks.op_callback(instrument_1.callback):
         x = constant_op.constant(6.0)
         y = math_ops.square(math_ops.log(x))
         return y
 def thread_1_job():
     with op_callbacks.op_callback(instrument_1.callback):
         square_log(x_float32)
 def log_2plus_unique_x(x):
     with op_callbacks.op_callback(instrument.callback):
         unique_values, _ = array_ops.unique(x)
         y = math_ops.log(2.0 + unique_values)
     return math_ops.sin(y)