Ejemplo n.º 1
0
 def testExecuteListTypeListShapeAttr(self):
   execute.execute(
       'Barrier',
       num_outputs=1,
       inputs=[],
       attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
              [[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
Ejemplo n.º 2
0
 def testExecuteUnknownAttr(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         'Identity',
         num_outputs=1,
         inputs=[tensor.Tensor(3)],
         attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
Ejemplo n.º 3
0
 def testExecuteListFloatAttrBadValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         'Bucketize',
         num_outputs=1,
         inputs=[tensor.Tensor([3.0, 5.0, 7.0])],
         attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
Ejemplo n.º 4
0
 def testExecuteShapeAttr(self):
   execute.execute(
       'VarHandleOp',
       num_outputs=1,
       inputs=[],
       attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
              'container', '', 'shared_name', ''))
Ejemplo n.º 5
0
 def testExecuteListIntAttrBadValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         'Squeeze',
         num_outputs=1,
         inputs=[tensor.Tensor([[[3.0]]])],
         attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
Ejemplo n.º 6
0
 def testExecuteListShapeAttrBadListValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         'Barrier',
         num_outputs=1,
         inputs=[],
         attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
                [1], 'capacity', -1, 'container', '', 'shared_name', ''))
Ejemplo n.º 7
0
 def testExecuteListTypeAttrBadListValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         b'Barrier',
         num_outputs=1,
         inputs=[],
         attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
                'container', '', 'shared_name', ''))
Ejemplo n.º 8
0
 def testExecuteListIntAttrBadListValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         b'Squeeze',
         num_outputs=1,
         inputs=[constant_op.constant([[[3.0]]])],
         attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
                ['0', '2']))
Ejemplo n.º 9
0
 def testExecuteListFloatAttrBadListValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         b'Bucketize',
         num_outputs=1,
         inputs=[constant_op.constant([3.0, 5.0, 7.0])],
         attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
                ['4.0', '6.0']))
Ejemplo n.º 10
0
 def testExecuteListStringAttrBadListValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         'TensorSummary',
         num_outputs=1,
         inputs=[tensor.Tensor(3.0)],
         attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
                'labels', [3], 'display_name', 'test'))
Ejemplo n.º 11
0
 def testExecuteListStringAttr(self):
   execute.execute(
       'TensorSummary',
       num_outputs=1,
       inputs=[tensor.Tensor(3.0)],
       attrs=('T', dtypes.float32.as_datatype_enum, 'description',
              'tensor_summary', 'labels', ['3',
                                           'summary'], 'display_name', 'test'))
Ejemplo n.º 12
0
 def testExecuteShapeAttrBadValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     execute.execute(
         'VarHandleOp',
         num_outputs=1,
         inputs=[],
         attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
                'container', '', 'shared_name', ''))
Ejemplo n.º 13
0
  def testOperationWithNoInputsRunsOnDevice(self):
    if not context.context().num_gpus():
      self.skipTest('No GPUs found')
    shape = tensor.Tensor([], dtype=dtypes.int32)

    # x: Run the "TruncatedNormal" op CPU and copy result to GPU.
    x = truncated_normal(shape).as_gpu_tensor()
    # y: Explicitly run the "TruncatedNormal" op on GPU.
    with context.device('gpu:0'):
      y = truncated_normal(shape)
    # Add would fail if x and y were not on the same device.
    execute.execute(
        'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
Ejemplo n.º 14
0
  def __call__(self, *args):
    """Executes the passed function in eager mode."""
    tensor_inputs = [
        x for x in nest.flatten(args)
        if isinstance(x, ops.Tensor)
    ]
    if tape.should_record(tensor_inputs) or tape.should_record(
        self._extra_inputs):
      if not self._has_backprop:
        self._compute_backprop()
      return self._backprop_call(tensor_inputs)

    if context.in_graph_mode():
      g = ops.get_default_graph()
      if self._fdef.name not in g._functions:  # pylint: disable=protected-access
        g._add_function(self._fdef)  # pylint: disable=protected-access
      signature = self._fdef.definition.signature
      args = list(tensor_inputs) + self._extra_inputs
      op = g.create_op(
          signature.name, [ops.convert_to_tensor(x) for x in args],
          [dtypes.DType(x.type) for x in signature.output_arg],
          op_def=signature,
          name="FunctionCall",
          compute_shapes=False)
      result = op.outputs
      for i, s in enumerate(self._output_shapes):
        result[i].set_shape(s)
    else:
      result = execute.execute(
          str(self._func_name),
          num_outputs=self._num_outputs,
          inputs=tensor_inputs + self._extra_inputs)

    return self._build_call_outputs(self._returns, result)
Ejemplo n.º 15
0
 def testExecuteIntAttr(self):
   total = execute.execute(
       b'AddN',
       num_outputs=1,
       inputs=[constant_op.constant(3), constant_op.constant(4)],
       attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
   self.assertEqual(7, total.numpy())
Ejemplo n.º 16
0
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function xla_launch
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(resources, (list, tuple)):
    raise TypeError(
        "Expected list for 'resources' argument to "
        "'xla_launch' Op, not %r." % resources)
  _attr_Nresources = len(resources)
  if not isinstance(Tresults, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tresults' argument to "
        "'xla_launch' Op, not %r." % Tresults)
  Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
  _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, _ctx)
  _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  resources = _ops.convert_n_to_tensor(resources, _dtypes.resource)
  _inputs_flat = list(constants) + list(args) + list(resources)
  _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs,
  "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function)
  _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "XlaLaunch", _inputs_flat, _attrs, _result, name)
  return _result
Ejemplo n.º 17
0
 def _capture_by_value(
     self,
     op_type,
     inputs,
     dtypes,  # pylint: disable=redefined-outer-name
     input_types=None,
     name=None,
     attrs=None,
     op_def=None,
     compute_shapes=True,
     compute_device=True):
   # When capturing by value, do the read outside
   reverse_captures = dict((v, k) for k, v in self.captures.items())
   uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
   with ops.init_scope():
     if context.executing_eagerly():
       attr_list = ("dtype", int(attrs["dtype"].type))
       value, = execute.execute(
           compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
           context.context())
     else:
       op = ops.get_default_graph().create_op(
           op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
           op_def, compute_shapes, compute_device)
       value = op.outputs[0]
   captured_value = self.capture(value)
   return captured_value.op
Ejemplo n.º 18
0
def build_sparse_inequality_splits_eager_fallback(num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id, feature_column_group_id, bias_feature_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function build_sparse_inequality_splits
  """
  _ctx = ctx if ctx else _context.context()
  num_minibatches = _ops.convert_to_tensor(num_minibatches, _dtypes.int64)
  partition_ids = _ops.convert_to_tensor(partition_ids, _dtypes.int32)
  bucket_ids = _ops.convert_to_tensor(bucket_ids, _dtypes.int64)
  gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
  hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
  bucket_boundaries = _ops.convert_to_tensor(bucket_boundaries, _dtypes.float32)
  class_id = _ops.convert_to_tensor(class_id, _dtypes.int32)
  feature_column_group_id = _ops.convert_to_tensor(feature_column_group_id, _dtypes.int32)
  bias_feature_id = _ops.convert_to_tensor(bias_feature_id, _dtypes.int64)
  l1_regularization = _ops.convert_to_tensor(l1_regularization, _dtypes.float32)
  l2_regularization = _ops.convert_to_tensor(l2_regularization, _dtypes.float32)
  tree_complexity_regularization = _ops.convert_to_tensor(tree_complexity_regularization, _dtypes.float32)
  min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32)
  multiclass_strategy = _ops.convert_to_tensor(multiclass_strategy, _dtypes.int32)
  _inputs_flat = [num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id, feature_column_group_id, bias_feature_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy]
  _attrs = None
  _result = _execute.execute(b"BuildSparseInequalitySplits", 3,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "BuildSparseInequalitySplits", _inputs_flat, _attrs, _result, name)
  _result = _BuildSparseInequalitySplitsOutput._make(_result)
  return _result
Ejemplo n.º 19
0
 def testExecuteStringAttrBadValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     _ = execute.execute(
         'CheckNumerics',
         num_outputs=1,
         inputs=[tensor.Tensor(3.)],
         attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
Ejemplo n.º 20
0
 def testExecuteFloatAttr(self):
   almost_equal = execute.execute(
       'ApproximateEqual',
       num_outputs=1,
       inputs=[tensor.Tensor(3.0), tensor.Tensor(2.9)],
       attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
   self.assertTrue(almost_equal.numpy())
Ejemplo n.º 21
0
 def testExecuteFloatAttrBadValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     _ = execute.execute(
         'ApproximateEqual',
         num_outputs=1,
         inputs=[tensor.Tensor(3.0), tensor.Tensor(2.9)],
         attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
Ejemplo n.º 22
0
 def testExecuteIntAttr(self):
   total = execute.execute(
       'AddN',
       num_outputs=1,
       inputs=[tensor.Tensor(3), tensor.Tensor(4)],
       attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
   self.assertEqual(7, total.numpy())
Ejemplo n.º 23
0
def truncated_normal(shape):
  return execute.execute(
      'TruncatedNormal',
      1,
      inputs=[shape],
      attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
             shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
Ejemplo n.º 24
0
 def testExecuteIntAttrBadValue(self):
   with self.assertRaises(errors.InvalidArgumentError):
     _ = execute.execute(
         'AddN',
         num_outputs=1,
         inputs=[tensor.Tensor(3), tensor.Tensor(4)],
         attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
Ejemplo n.º 25
0
 def testExecuteListIntAttr(self):
   b = execute.execute(
       'Squeeze',
       num_outputs=1,
       inputs=[tensor.Tensor([[[3.0]]])],
       attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
   self.assertAllEqual([3], b.numpy())
Ejemplo n.º 26
0
 def testExecuteStringAttr(self):
   checked_three = execute.execute(
       'CheckNumerics',
       num_outputs=1,
       inputs=[tensor.Tensor(3.)],
       attrs=('message', 'just checking', 'T',
              dtypes.float32.as_datatype_enum))[0]
   self.assertEqual([[3]], checked_three.numpy())
Ejemplo n.º 27
0
def _eager_fill(dims, value):
  """Eager-only version of Fill op; requires value is an eager Tensor."""
  attr_t = value.dtype.as_datatype_enum
  dims = convert_to_eager_tensor(dims, dtypes.int32)
  inputs_flat = [dims, value]
  attrs = ("T", attr_t)
  result, = execute.execute(b"Fill", 1, inputs=inputs_flat, attrs=attrs)
  return result
Ejemplo n.º 28
0
 def testExecuteListFloatAttr(self):
   b = execute.execute(
       'Bucketize',
       num_outputs=1,
       inputs=[tensor.Tensor([3.0, 5.0, 7.0])],
       attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
                                                                   6.0]))[0]
   self.assertAllEqual([0, 1, 2], b.numpy())
Ejemplo n.º 29
0
 def testExecuteBoolAttr(self):
   product = execute.execute(
       'MatMul',
       num_outputs=1,
       inputs=[tensor.Tensor([[3]]),
               tensor.Tensor([[5]])],
       attrs=('transpose_a', True, 'transpose_b', False, 'T',
              dtypes.int32.as_datatype_enum))[0]
   self.assertEqual([[15]], product.numpy())
Ejemplo n.º 30
0
 def testExecuteTooManyNumOutputs(self):
   # num_outputs provided is 50, but only one output is produced.
   # That should be okay.
   product = execute.execute(
       'Mul',
       num_outputs=50,
       inputs=[tensor.Tensor(3), tensor.Tensor(5)],
       attrs=('T', dtypes.int32.as_datatype_enum))[0]
   self.assertEqual(15, product.numpy())
Ejemplo n.º 31
0
def decision_tree_ensemble_resource_handle_op(container="", shared_name="", name=None):
  r"""Creates a handle to a DecisionTreeEnsembleResource

  Args:
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `resource`.
  """
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "DecisionTreeEnsembleResourceHandleOp", container=container,
        shared_name=shared_name, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("container", _op.get_attr("container"), "shared_name",
              _op.get_attr("shared_name"))
  else:
    _inputs_flat = []
    _attrs = ("container", container, "shared_name", shared_name)
    _result = _execute.execute(b"DecisionTreeEnsembleResourceHandleOp", 1,
                               inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                               name=name)
  _execute.record_gradient(
      "DecisionTreeEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Ejemplo n.º 32
0
def try_rpc_eager_fallback(address,
                           method,
                           request,
                           protocol="",
                           fail_fast=True,
                           timeout_in_ms=0,
                           name=None,
                           ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function try_rpc
  """
    _ctx = ctx if ctx else _context.context()
    if protocol is None:
        protocol = ""
    protocol = _execute.make_str(protocol, "protocol")
    if fail_fast is None:
        fail_fast = True
    fail_fast = _execute.make_bool(fail_fast, "fail_fast")
    if timeout_in_ms is None:
        timeout_in_ms = 0
    timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
    address = _ops.convert_to_tensor(address, _dtypes.string)
    method = _ops.convert_to_tensor(method, _dtypes.string)
    request = _ops.convert_to_tensor(request, _dtypes.string)
    _inputs_flat = [address, method, request]
    _attrs = ("protocol", protocol, "fail_fast", fail_fast, "timeout_in_ms",
              timeout_in_ms)
    _result = _execute.execute(b"TryRpc",
                               3,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("TryRpc", _inputs_flat, _attrs, _result, name)
    _result = _TryRpcOutput._make(_result)
    return _result
Ejemplo n.º 33
0
def population_count_eager_fallback(x, name, ctx):
    _attr_T, (x, ) = _execute.args_to_matching_eager([x], ctx, [
        _dtypes.int8,
        _dtypes.int16,
        _dtypes.int32,
        _dtypes.int64,
        _dtypes.uint8,
        _dtypes.uint16,
        _dtypes.uint32,
        _dtypes.uint64,
    ])
    _inputs_flat = [x]
    _attrs = ("T", _attr_T)
    _result = _execute.execute(b"PopulationCount",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("PopulationCount", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Ejemplo n.º 34
0
def image_projective_transform_eager_fallback(images,
                                              transforms,
                                              interpolation,
                                              name=None,
                                              ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function image_projective_transform
  """
    _ctx = ctx if ctx else _context.context()
    interpolation = _execute.make_str(interpolation, "interpolation")
    _attr_dtype, (images, ) = _execute.args_to_matching_eager([images], _ctx)
    transforms = _ops.convert_to_tensor(transforms, _dtypes.float32)
    _inputs_flat = [images, transforms]
    _attrs = ("dtype", _attr_dtype, "interpolation", interpolation)
    _result = _execute.execute(b"ImageProjectiveTransform",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("ImageProjectiveTransform", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Ejemplo n.º 35
0
def ignite_dataset_eager_fallback(cache_name,
                                  host,
                                  port,
                                  local,
                                  part,
                                  page_size,
                                  schema,
                                  permutation,
                                  name=None,
                                  ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function ignite_dataset
  """
    _ctx = ctx if ctx else _context.context()
    cache_name = _ops.convert_to_tensor(cache_name, _dtypes.string)
    host = _ops.convert_to_tensor(host, _dtypes.string)
    port = _ops.convert_to_tensor(port, _dtypes.int32)
    local = _ops.convert_to_tensor(local, _dtypes.bool)
    part = _ops.convert_to_tensor(part, _dtypes.int32)
    page_size = _ops.convert_to_tensor(page_size, _dtypes.int32)
    schema = _ops.convert_to_tensor(schema, _dtypes.int32)
    permutation = _ops.convert_to_tensor(permutation, _dtypes.int32)
    _inputs_flat = [
        cache_name, host, port, local, part, page_size, schema, permutation
    ]
    _attrs = None
    _result = _execute.execute(b"IgniteDataset",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("IgniteDataset", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Ejemplo n.º 36
0
def ragged_count_sparse_output_eager_fallback(splits, values, weights,
                                              binary_output, minlength,
                                              maxlength, name, ctx):
    binary_output = _execute.make_bool(binary_output, "binary_output")
    if minlength is None:
        minlength = -1
    minlength = _execute.make_int(minlength, "minlength")
    if maxlength is None:
        maxlength = -1
    maxlength = _execute.make_int(maxlength, "maxlength")
    _attr_T, (values, ) = _execute.args_to_matching_eager([values], ctx, [
        _dtypes.int32,
        _dtypes.int64,
    ])
    _attr_output_type, (weights, ) = _execute.args_to_matching_eager(
        [weights], ctx, [
            _dtypes.int32,
            _dtypes.int64,
            _dtypes.float32,
            _dtypes.float64,
        ])
    splits = _ops.convert_to_tensor(splits, _dtypes.int64)
    _inputs_flat = [splits, values, weights]
    _attrs = ("T", _attr_T, "minlength", minlength, "maxlength", maxlength,
              "binary_output", binary_output, "output_type", _attr_output_type)
    _result = _execute.execute(b"RaggedCountSparseOutput",
                               3,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("RaggedCountSparseOutput", _inputs_flat,
                                 _attrs, _result)
    _result = _RaggedCountSparseOutputOutput._make(_result)
    return _result
Ejemplo n.º 37
0
def ctc_beam_search_decoder_eager_fallback(inputs,
                                           sequence_length,
                                           beam_width,
                                           top_paths,
                                           merge_repeated=True,
                                           name=None,
                                           ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function ctc_beam_search_decoder
  """
    _ctx = ctx if ctx else _context.context()
    beam_width = _execute.make_int(beam_width, "beam_width")
    top_paths = _execute.make_int(top_paths, "top_paths")
    if merge_repeated is None:
        merge_repeated = True
    merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
    inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
    sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32)
    _inputs_flat = [inputs, sequence_length]
    _attrs = ("beam_width", beam_width, "top_paths", top_paths,
              "merge_repeated", merge_repeated)
    _result = _execute.execute(b"CTCBeamSearchDecoder",
                               top_paths + top_paths + top_paths + 1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("CTCBeamSearchDecoder", _inputs_flat, _attrs,
                             _result, name)
    _result = [_result[:top_paths]] + _result[top_paths:]
    _result = _result[:1] + [_result[1:1 + top_paths]
                             ] + _result[1 + top_paths:]
    _result = _result[:2] + [_result[2:2 + top_paths]
                             ] + _result[2 + top_paths:]
    _result = _CTCBeamSearchDecoderOutput._make(_result)
    return _result
Ejemplo n.º 38
0
def dense_table_push_pull_eager_fallback(vars, grads, table_handle, name, ctx):
  if not isinstance(vars, (list, tuple)):
    raise TypeError(
        "Expected list for 'vars' argument to "
        "'dense_table_push_pull' Op, not %r." % vars)
  _attr_N = len(vars)
  if not isinstance(grads, (list, tuple)):
    raise TypeError(
        "Expected list for 'grads' argument to "
        "'dense_table_push_pull' Op, not %r." % grads)
  if len(grads) != _attr_N:
    raise ValueError(
        "List argument 'grads' to 'dense_table_push_pull' Op with length %d "
        "must match length %d of argument 'vars'." %
        (len(grads), _attr_N))
  table_handle = _execute.make_int(table_handle, "table_handle")
  vars = _ops.convert_n_to_tensor(vars, _dtypes.resource)
  grads = _ops.convert_n_to_tensor(grads, _dtypes.float32)
  _inputs_flat = list(vars) + list(grads)
  _attrs = ("table_handle", table_handle, "N", _attr_N)
  _result = _execute.execute(b"DenseTablePushPull", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  _result = None
  return _result
Ejemplo n.º 39
0
def unbatch_eager_fallback(batched_tensor, batch_index, id, timeout_micros, container="", shared_name="", name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function unbatch
  """
  _ctx = ctx if ctx else _context.context()
  timeout_micros = _execute.make_int(timeout_micros, "timeout_micros")
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  _attr_T, (batched_tensor,) = _execute.args_to_matching_eager([batched_tensor], _ctx)
  batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64)
  id = _ops.convert_to_tensor(id, _dtypes.int64)
  _inputs_flat = [batched_tensor, batch_index, id]
  _attrs = ("timeout_micros", timeout_micros, "container", container,
  "shared_name", shared_name, "T", _attr_T)
  _result = _execute.execute(b"Unbatch", 1, inputs=_inputs_flat, attrs=_attrs,
                             ctx=_ctx, name=name)
  _execute.record_gradient(
      "Unbatch", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def generate_vocab_remapping_eager_fallback(new_vocab_file, old_vocab_file,
                                            new_vocab_offset, num_new_vocab,
                                            old_vocab_size, name, ctx):
    new_vocab_offset = _execute.make_int(new_vocab_offset, "new_vocab_offset")
    num_new_vocab = _execute.make_int(num_new_vocab, "num_new_vocab")
    if old_vocab_size is None:
        old_vocab_size = -1
    old_vocab_size = _execute.make_int(old_vocab_size, "old_vocab_size")
    new_vocab_file = _ops.convert_to_tensor(new_vocab_file, _dtypes.string)
    old_vocab_file = _ops.convert_to_tensor(old_vocab_file, _dtypes.string)
    _inputs_flat = [new_vocab_file, old_vocab_file]
    _attrs = ("new_vocab_offset", new_vocab_offset, "num_new_vocab",
              num_new_vocab, "old_vocab_size", old_vocab_size)
    _result = _execute.execute(b"GenerateVocabRemapping",
                               2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("GenerateVocabRemapping", _inputs_flat,
                                 _attrs, _result)
    _result = _GenerateVocabRemappingOutput._make(_result)
    return _result
Ejemplo n.º 41
0
def bigtable_range_key_dataset_eager_fallback(table,
                                              start_key,
                                              end_key,
                                              name=None,
                                              ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function bigtable_range_key_dataset
  """
    _ctx = ctx if ctx else _context.context()
    table = _ops.convert_to_tensor(table, _dtypes.resource)
    start_key = _ops.convert_to_tensor(start_key, _dtypes.string)
    end_key = _ops.convert_to_tensor(end_key, _dtypes.string)
    _inputs_flat = [table, start_key, end_key]
    _attrs = None
    _result = _execute.execute(b"BigtableRangeKeyDataset",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("BigtableRangeKeyDataset", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Ejemplo n.º 42
0
def tree_ensemble_deserialize_eager_fallback(tree_ensemble_handle,
                                             stamp_token,
                                             tree_ensemble_config,
                                             name=None,
                                             ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function tree_ensemble_deserialize
  """
    _ctx = ctx if ctx else _context.context()
    tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle,
                                                  _dtypes.resource)
    stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
    tree_ensemble_config = _ops.convert_to_tensor(tree_ensemble_config,
                                                  _dtypes.string)
    _inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_config]
    _attrs = None
    _result = _execute.execute(b"TreeEnsembleDeserialize",
                               0,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _result = None
    return _result
Ejemplo n.º 43
0
def stateful_random_binomial_eager_fallback(resource, algorithm, shape, counts,
                                            probs, dtype, name, ctx):
    if dtype is None:
        dtype = _dtypes.int64
    dtype = _execute.make_type(dtype, "dtype")
    _attr_S, (shape, ) = _execute.args_to_matching_eager([shape], ctx)
    _attr_T, _inputs_T = _execute.args_to_matching_eager([counts, probs], ctx,
                                                         _dtypes.float64)
    (counts, probs) = _inputs_T
    resource = _ops.convert_to_tensor(resource, _dtypes.resource)
    algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
    _inputs_flat = [resource, algorithm, shape, counts, probs]
    _attrs = ("S", _attr_S, "T", _attr_T, "dtype", dtype)
    _result = _execute.execute(b"StatefulRandomBinomial",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("StatefulRandomBinomial", _inputs_flat,
                                 _attrs, _result)
    _result, = _result
    return _result
def assign_sub_variable_op(resource, value, name=None):
    r"""Subtracts a value from the current value of a variable.

  Any ReadVariableOp with a control dependency on this op is guaranteed to

  see the decremented value or a subsequent newer one.

  Args:
    resource: A `Tensor` of type `resource`.
      handle to the resource in which to store the variable.
    value: A `Tensor`. the value by which the variable will be incremented.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("AssignSubVariableOp",
                                                 resource=resource,
                                                 value=value,
                                                 name=name)
        return _op
    else:
        _attr_dtype, (value, ) = _execute.args_to_matching_eager([value], _ctx)
        resource = _ops.convert_to_tensor(resource, _dtypes.resource)
        _inputs_flat = [resource, value]
        _attrs = ("dtype", _attr_dtype)
        _result = _execute.execute(b"AssignSubVariableOp",
                                   0,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
        _result = None
    return _result
def tree_ensemble_used_handlers_eager_fallback(tree_ensemble_handle,
                                               stamp_token,
                                               num_all_handlers,
                                               name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function tree_ensemble_used_handlers
  """
    _ctx = _context.context()
    num_all_handlers = _execute.make_int(num_all_handlers, "num_all_handlers")
    tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle,
                                                  _dtypes.resource)
    stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
    _inputs_flat = [tree_ensemble_handle, stamp_token]
    _attrs = ("num_all_handlers", num_all_handlers)
    _result = _execute.execute(b"TreeEnsembleUsedHandlers",
                               2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("TreeEnsembleUsedHandlers", _inputs_flat, _attrs,
                             _result, name)
    _result = _TreeEnsembleUsedHandlersOutput._make(_result)
    return _result
Ejemplo n.º 46
0
def right_shift_eager_fallback(x, y, name, ctx):
    _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [
        _dtypes.int8,
        _dtypes.int16,
        _dtypes.int32,
        _dtypes.int64,
        _dtypes.uint8,
        _dtypes.uint16,
        _dtypes.uint32,
        _dtypes.uint64,
    ])
    (x, y) = _inputs_T
    _inputs_flat = [x, y]
    _attrs = ("T", _attr_T)
    _result = _execute.execute(b"RightShift",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("RightShift", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Ejemplo n.º 47
0
    def _backprop_call(self, args):
        """Calls the wrapped function and records the result on a tape."""
        all_args = args + self._extra_inputs
        signature = self._forward_fdef.definition.signature
        if context.in_graph_mode():
            g = ops.get_default_graph()
            g._add_function(self._forward_fdef)  # pylint: disable=protected-access

            def make_tensor(x):
                if isinstance(x, ops.Tensor):
                    return x
                return ops.convert_to_tensor(x)

            op = g.create_op(
                signature.name, [make_tensor(x) for x in all_args],
                [dtypes.DType(x.type) for x in signature.output_arg],
                op_def=signature,
                name="FunctionCall",
                compute_shapes=False)
            outputs = op.outputs
            outputs = [outputs] if isinstance(outputs,
                                              (ops.Tensor,
                                               type(None))) else list(outputs)
            for i, s in enumerate(self._output_shapes):
                outputs[i].set_shape(s)
        else:
            outputs = execute.execute(str(signature.name),
                                      num_outputs=len(signature.output_arg),
                                      inputs=all_args)
        real_outputs = outputs[:len(self._returns)]
        side_outputs = outputs[len(self._returns):]

        tape.record_operation(real_outputs, (args + self._extra_inputs),
                              side_outputs, self._backward_function)

        return self._build_call_outputs(self._returns, real_outputs)
Ejemplo n.º 48
0
def loop_cond(input, name=None):
    r"""Forwards the input to the output.

  This operator represents the loop termination condition used by the

  "pivot" switches of a loop.

  Args:
    input: A `Tensor` of type `bool`.
      A boolean scalar, representing the branch predicate of the Switch op.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `bool`.
  """
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("LoopCond",
                                                 input=input,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = None
    else:
        input = _ops.convert_to_tensor(input, _dtypes.bool)
        _inputs_flat = [input]
        _attrs = None
        _result = _execute.execute(b"LoopCond",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("LoopCond", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
def stateless_random_binomial_eager_fallback(shape, seed, counts, probs, dtype,
                                             name, ctx):
    if dtype is None:
        dtype = _dtypes.int64
    dtype = _execute.make_type(dtype, "dtype")
    _attr_S, (shape, ) = _execute.args_to_matching_eager([shape], ctx)
    _attr_Tseed, (seed, ) = _execute.args_to_matching_eager([seed], ctx,
                                                            _dtypes.int64)
    _attr_T, _inputs_T = _execute.args_to_matching_eager([counts, probs], ctx,
                                                         _dtypes.float64)
    (counts, probs) = _inputs_T
    _inputs_flat = [shape, seed, counts, probs]
    _attrs = ("S", _attr_S, "Tseed", _attr_Tseed, "T", _attr_T, "dtype", dtype)
    _result = _execute.execute(b"StatelessRandomBinomial",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("StatelessRandomBinomial", _inputs_flat,
                                 _attrs, _result)
    _result, = _result
    return _result
Ejemplo n.º 50
0
def tensor_summary_v2_eager_fallback(tag,
                                     tensor,
                                     serialized_summary_metadata,
                                     name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function tensor_summary_v2
  """
    _ctx = _context.context()
    _attr_T, (tensor, ) = _execute.args_to_matching_eager([tensor], _ctx)
    tag = _ops.convert_to_tensor(tag, _dtypes.string)
    serialized_summary_metadata = _ops.convert_to_tensor(
        serialized_summary_metadata, _dtypes.string)
    _inputs_flat = [tag, tensor, serialized_summary_metadata]
    _attrs = ("T", _attr_T)
    _result = _execute.execute(b"TensorSummaryV2",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("TensorSummaryV2", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Ejemplo n.º 51
0
def write_image_summary_eager_fallback(writer, step, tag, tensor, bad_color,
                                       max_images, name, ctx):
    if max_images is None:
        max_images = 3
    max_images = _execute.make_int(max_images, "max_images")
    _attr_T, (tensor, ) = _execute.args_to_matching_eager([tensor], ctx, [
        _dtypes.uint8,
        _dtypes.float32,
        _dtypes.half,
    ], _dtypes.float32)
    writer = _ops.convert_to_tensor(writer, _dtypes.resource)
    step = _ops.convert_to_tensor(step, _dtypes.int64)
    tag = _ops.convert_to_tensor(tag, _dtypes.string)
    bad_color = _ops.convert_to_tensor(bad_color, _dtypes.uint8)
    _inputs_flat = [writer, step, tag, tensor, bad_color]
    _attrs = ("max_images", max_images, "T", _attr_T)
    _result = _execute.execute(b"WriteImageSummary",
                               0,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    _result = None
    return _result
Ejemplo n.º 52
0
def grow_tree_v4_eager_fallback(tree_handle,
                                stats_handle,
                                finished_nodes,
                                params,
                                name=None,
                                ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function grow_tree_v4
  """
    _ctx = ctx if ctx else _context.context()
    params = _execute.make_str(params, "params")
    tree_handle = _ops.convert_to_tensor(tree_handle, _dtypes.resource)
    stats_handle = _ops.convert_to_tensor(stats_handle, _dtypes.resource)
    finished_nodes = _ops.convert_to_tensor(finished_nodes, _dtypes.int32)
    _inputs_flat = [tree_handle, stats_handle, finished_nodes]
    _attrs = ("params", params)
    _result = _execute.execute(b"GrowTreeV4",
                               0,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _result = None
    return _result
Ejemplo n.º 53
0
def no_op(name=None):
    r"""Does nothing. Only useful as a placeholder for control edges.

  Args:
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("NoOp", name=name)
        return _op
    else:
        _inputs_flat = []
        _attrs = None
        _result = _execute.execute(b"NoOp",
                                   0,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
        _result = None
    return _result
Ejemplo n.º 54
0
def image_summary_eager_fallback(
        tag,
        tensor,
        max_images=3,
        bad_color=_execute.
    make_tensor(
        """dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255""",
        "bad_color"),
        name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function image_summary
  """
    _ctx = _context.context()
    if max_images is None:
        max_images = 3
    max_images = _execute.make_int(max_images, "max_images")
    if bad_color is None:
        bad_color = _execute.make_tensor(
            """dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255""",
            "bad_color")
    bad_color = _execute.make_tensor(bad_color, "bad_color")
    _attr_T, (tensor, ) = _execute.args_to_matching_eager([tensor], _ctx,
                                                          _dtypes.float32)
    tag = _ops.convert_to_tensor(tag, _dtypes.string)
    _inputs_flat = [tag, tensor]
    _attrs = ("max_images", max_images, "T", _attr_T, "bad_color", bad_color)
    _result = _execute.execute(b"ImageSummary",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("ImageSummary", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
def mfcc_eager_fallback(spectrogram, sample_rate, upper_frequency_limit,
                        lower_frequency_limit, filterbank_channel_count,
                        dct_coefficient_count, name, ctx):
    if upper_frequency_limit is None:
        upper_frequency_limit = 4000
    upper_frequency_limit = _execute.make_float(upper_frequency_limit,
                                                "upper_frequency_limit")
    if lower_frequency_limit is None:
        lower_frequency_limit = 20
    lower_frequency_limit = _execute.make_float(lower_frequency_limit,
                                                "lower_frequency_limit")
    if filterbank_channel_count is None:
        filterbank_channel_count = 40
    filterbank_channel_count = _execute.make_int(filterbank_channel_count,
                                                 "filterbank_channel_count")
    if dct_coefficient_count is None:
        dct_coefficient_count = 13
    dct_coefficient_count = _execute.make_int(dct_coefficient_count,
                                              "dct_coefficient_count")
    spectrogram = _ops.convert_to_tensor(spectrogram, _dtypes.float32)
    sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
    _inputs_flat = [spectrogram, sample_rate]
    _attrs = ("upper_frequency_limit", upper_frequency_limit,
              "lower_frequency_limit", lower_frequency_limit,
              "filterbank_channel_count", filterbank_channel_count,
              "dct_coefficient_count", dct_coefficient_count)
    _result = _execute.execute(b"Mfcc",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("Mfcc", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Ejemplo n.º 56
0
def tensor_forest_tree_predict_eager_fallback(tree_handle,
                                              dense_features,
                                              logits_dimension,
                                              name=None,
                                              ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function tensor_forest_tree_predict
  """
    _ctx = ctx if ctx else _context.context()
    logits_dimension = _execute.make_int(logits_dimension, "logits_dimension")
    tree_handle = _ops.convert_to_tensor(tree_handle, _dtypes.resource)
    dense_features = _ops.convert_to_tensor(dense_features, _dtypes.float32)
    _inputs_flat = [tree_handle, dense_features]
    _attrs = ("logits_dimension", logits_dimension)
    _result = _execute.execute(b"TensorForestTreePredict",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("TensorForestTreePredict", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Ejemplo n.º 57
0
def enter_eager_fallback(data, frame_name, is_constant, parallel_iterations,
                         name, ctx):
    frame_name = _execute.make_str(frame_name, "frame_name")
    if is_constant is None:
        is_constant = False
    is_constant = _execute.make_bool(is_constant, "is_constant")
    if parallel_iterations is None:
        parallel_iterations = 10
    parallel_iterations = _execute.make_int(parallel_iterations,
                                            "parallel_iterations")
    _attr_T, (data, ) = _execute.args_to_matching_eager([data], ctx)
    _inputs_flat = [data]
    _attrs = ("T", _attr_T, "frame_name", frame_name, "is_constant",
              is_constant, "parallel_iterations", parallel_iterations)
    _result = _execute.execute(b"Enter",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("Enter", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Ejemplo n.º 58
0
def tensor_forest_tree_resource_handle_op_eager_fallback(
        container="", shared_name="", name=None, ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function tensor_forest_tree_resource_handle_op
  """
    _ctx = ctx if ctx else _context.context()
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _inputs_flat = []
    _attrs = ("container", container, "shared_name", shared_name)
    _result = _execute.execute(b"TensorForestTreeResourceHandleOp",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("TensorForestTreeResourceHandleOp", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
Ejemplo n.º 59
0
def ignore_errors_dataset_eager_fallback(input_dataset, output_types, output_shapes, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function ignore_errors_dataset
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'ignore_errors_dataset' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'ignore_errors_dataset' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant)
  _inputs_flat = [input_dataset]
  _attrs = ("output_types", output_types, "output_shapes", output_shapes)
  _result = _execute.execute(b"IgnoreErrorsDataset", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "IgnoreErrorsDataset", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Ejemplo n.º 60
0
def resampler_grad_eager_fallback(data,
                                  warp,
                                  grad_output,
                                  name=None,
                                  ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function resampler_grad
  """
    _ctx = ctx if ctx else _context.context()
    _attr_T, _inputs_T = _execute.args_to_matching_eager(
        [data, warp, grad_output], _ctx)
    (data, warp, grad_output) = _inputs_T
    _inputs_flat = [data, warp, grad_output]
    _attrs = ("T", _attr_T)
    _result = _execute.execute(b"ResamplerGrad",
                               2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("ResamplerGrad", _inputs_flat, _attrs, _result,
                             name)
    _result = _ResamplerGradOutput._make(_result)
    return _result