def testArgsToMatchingEagerDefault(self): # Uses default t, r = execute.args_to_matching_eager([[3, 4]], dtypes.int32) self.assertEquals(t, dtypes.int32) self.assertEquals(r[0].dtype, dtypes.int32) t, r = execute.args_to_matching_eager([[3, 4]], dtypes.int64) self.assertEquals(t, dtypes.int64) self.assertEquals(r[0].dtype, dtypes.int64) # Doesn't use default t, r = execute.args_to_matching_eager([['string', 'arg']], dtypes.int32) self.assertEquals(t, dtypes.string) self.assertEquals(r[0].dtype, dtypes.string)
def _eager_reshape(tensor, shape): """Eager-only version of Reshape op; requires tensor is an eager Tensor.""" attr_t = tensor.dtype.as_datatype_enum attr_tshape, (shape,) = execute.args_to_matching_eager([shape], dtypes.int32) attr_tshape = attr_tshape.as_datatype_enum inputs_flat = [tensor, shape] attrs = ("T", attr_t, "Tshape", attr_tshape) result, = execute.execute(b"Reshape", 1, inputs=inputs_flat, attrs=attrs) return result
def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function single_image_random_dot_stereograms """ _ctx = ctx if ctx else _context.context() if hidden_surface_removal is None: hidden_surface_removal = True hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal") if convergence_dots_size is None: convergence_dots_size = 8 convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size") if dots_per_inch is None: dots_per_inch = 72 dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch") if eye_separation is None: eye_separation = 2.5 eye_separation = _execute.make_float(eye_separation, "eye_separation") if mu is None: mu = 0.3333 mu = _execute.make_float(mu, "mu") if normalize is None: normalize = True normalize = _execute.make_bool(normalize, "normalize") if normalize_max is None: normalize_max = -100 normalize_max = _execute.make_float(normalize_max, "normalize_max") if normalize_min is None: normalize_min = 100 normalize_min = _execute.make_float(normalize_min, "normalize_min") if border_level is None: border_level = 0 border_level = _execute.make_float(border_level, "border_level") if number_colors is None: number_colors = 256 number_colors = _execute.make_int(number_colors, "number_colors") if output_image_shape is None: output_image_shape = [1024, 768, 1] output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape") if output_data_window is None: output_data_window = [1022, 757] output_data_window = _execute.make_shape(output_data_window, "output_data_window") _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx) _inputs_flat = [depth_values] _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal, "convergence_dots_size", convergence_dots_size, "dots_per_inch", dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize", normalize, "normalize_max", normalize_max, "normalize_min", normalize_min, "border_level", border_level, "number_colors", number_colors, "output_image_shape", output_image_shape, "output_data_window", output_data_window) _result = _execute.execute(b"SingleImageRandomDotStereograms", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def _eager_reshape(tensor, shape, ctx): """Eager-only version of Reshape op; requires tensor is an eager Tensor.""" attr_t = tensor._datatype_enum() # pylint: disable=protected-access attr_tshape, (shape,) = execute.args_to_matching_eager( [shape], ctx, dtypes.int32) inputs_flat = [tensor, shape] attrs = ("T", attr_t, "Tshape", attr_tshape) result, = execute.execute( b"Reshape", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx) return result
def xla_cluster_output_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function xla_cluster_output """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"XlaClusterOutput", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "XlaClusterOutput", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def image_connected_components_eager_fallback(image, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function image_connected_components """ _ctx = ctx if ctx else _context.context() _attr_dtype, (image,) = _execute.args_to_matching_eager([image], _ctx) _inputs_flat = [image] _attrs = ("dtype", _attr_dtype) _result = _execute.execute(b"ImageConnectedComponents", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ImageConnectedComponents", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def image_projective_transform_eager_fallback(images, transforms, interpolation, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function image_projective_transform """ _ctx = ctx if ctx else _context.context() interpolation = _execute.make_str(interpolation, "interpolation") _attr_dtype, (images,) = _execute.args_to_matching_eager([images], _ctx) transforms = _ops.convert_to_tensor(transforms, _dtypes.float32) _inputs_flat = [images, transforms] _attrs = ("dtype", _attr_dtype, "interpolation", interpolation) _result = _execute.execute(b"ImageProjectiveTransform", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ImageProjectiveTransform", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def adjust_hsv_in_yiq_eager_fallback(images, delta_h, scale_s, scale_v, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function adjust_hsv_in_yiq """ _ctx = ctx if ctx else _context.context() _attr_T, (images,) = _execute.args_to_matching_eager([images], _ctx) delta_h = _ops.convert_to_tensor(delta_h, _dtypes.float32) scale_s = _ops.convert_to_tensor(scale_s, _dtypes.float32) scale_v = _ops.convert_to_tensor(scale_v, _dtypes.float32) _inputs_flat = [images, delta_h, scale_s, scale_v] _attrs = ("T", _attr_T) _result = _execute.execute(b"AdjustHsvInYiq", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "AdjustHsvInYiq", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def substr_eager_fallback(input, pos, len, name=None): r"""This is the slowpath function for Eager mode. This is for function substr """ _ctx = _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([pos, len], _ctx) (pos, len) = _inputs_T input = _ops.convert_to_tensor(input, _dtypes.string) _inputs_flat = [input, pos, len] _attrs = ("T", _attr_T) _result = _execute.execute(b"Substr", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("Substr", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def tensor_summary_v2_eager_fallback(tag, tensor, serialized_summary_metadata, name, ctx): _attr_T, (tensor, ) = _execute.args_to_matching_eager([tensor], ctx) tag = _ops.convert_to_tensor(tag, _dtypes.string) serialized_summary_metadata = _ops.convert_to_tensor( serialized_summary_metadata, _dtypes.string) _inputs_flat = [tag, tensor, serialized_summary_metadata] _attrs = ("T", _attr_T) _result = _execute.execute(b"TensorSummaryV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("TensorSummaryV2", _inputs_flat, _attrs, _result) _result, = _result return _result
def periodic_resample_eager_fallback(values, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function periodic_resample """ _ctx = ctx if ctx else _context.context() shape = _execute.make_shape(shape, "shape") _attr_T, (values, ) = _execute.args_to_matching_eager([values], _ctx) _inputs_flat = [values] _attrs = ("T", _attr_T, "shape", shape) _result = _execute.execute(b"PeriodicResample", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("PeriodicResample", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function block_lstm """ _ctx = ctx if ctx else _context.context() if forget_bias is None: forget_bias = 1 forget_bias = _execute.make_float(forget_bias, "forget_bias") if cell_clip is None: cell_clip = 3 cell_clip = _execute.make_float(cell_clip, "cell_clip") if use_peephole is None: use_peephole = False use_peephole = _execute.make_bool(use_peephole, "use_peephole") _attr_T, _inputs_T = _execute.args_to_matching_eager( [x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx) (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64) _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b] _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip, "use_peephole", use_peephole, "T", _attr_T) _result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("BlockLSTM", _inputs_flat, _attrs, _result, name) _result = _BlockLSTMOutput._make(_result) return _result
def collective_gather_eager_fallback(input, group_size, group_key, instance_key, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function collective_gather """ _ctx = ctx if ctx else _context.context() group_size = _execute.make_int(group_size, "group_size") group_key = _execute.make_int(group_key, "group_key") instance_key = _execute.make_int(instance_key, "instance_key") shape = _execute.make_shape(shape, "shape") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key, "instance_key", instance_key, "shape", shape) _result = _execute.execute(b"CollectiveGather", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "CollectiveGather", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def write_scalar_summary_eager_fallback(writer, step, tag, value, name=None): r"""This is the slowpath function for Eager mode. This is for function write_scalar_summary """ _ctx = _context.context() _attr_T, (value, ) = _execute.args_to_matching_eager([value], _ctx) writer = _ops.convert_to_tensor(writer, _dtypes.resource) step = _ops.convert_to_tensor(step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, step, tag, value] _attrs = ("T", _attr_T) _result = _execute.execute(b"WriteScalarSummary", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _result = None return _result
def nccl_broadcast_eager_fallback(input, shape, name=None): r"""This is the slowpath function for Eager mode. This is for function nccl_broadcast """ _ctx = _context.context() shape = _execute.make_shape(shape, "shape") _attr_T, (input, ) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "shape", shape) _result = _execute.execute(b"NcclBroadcast", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("NcclBroadcast", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def set_size_eager_fallback(set_indices, set_values, set_shape, validate_indices=True, name=None): r"""This is the slowpath function for Eager mode. This is for function set_size """ _ctx = _context.context() if validate_indices is None: validate_indices = True validate_indices = _execute.make_bool(validate_indices, "validate_indices") _attr_T, (set_values,) = _execute.args_to_matching_eager([set_values], _ctx) set_indices = _ops.convert_to_tensor(set_indices, _dtypes.int64) set_shape = _ops.convert_to_tensor(set_shape, _dtypes.int64) _inputs_flat = [set_indices, set_values, set_shape] _attrs = ("validate_indices", validate_indices, "T", _attr_T) _result = _execute.execute(b"SetSize", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SetSize", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def ragged_range_eager_fallback(starts, limits, deltas, Tsplits, name, ctx): if Tsplits is None: Tsplits = _dtypes.int64 Tsplits = _execute.make_type(Tsplits, "Tsplits") _attr_T, _inputs_T = _execute.args_to_matching_eager( [starts, limits, deltas], ctx, _dtypes.int32) (starts, limits, deltas) = _inputs_T _inputs_flat = [starts, limits, deltas] _attrs = ("T", _attr_T, "Tsplits", Tsplits) _result = _execute.execute(b"RaggedRange", 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("RaggedRange", _inputs_flat, _attrs, _result) _result = _RaggedRangeOutput._make(_result) return _result
def non_deterministic_ints_eager_fallback(shape, dtype, name, ctx): if dtype is None: dtype = _dtypes.int64 dtype = _execute.make_type(dtype, "dtype") _attr_shape_dtype, (shape, ) = _execute.args_to_matching_eager( [shape], ctx, [], _dtypes.int64) _inputs_flat = [shape] _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) _result = _execute.execute(b"NonDeterministicInts", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("NonDeterministicInts", _inputs_flat, _attrs, _result) _result, = _result return _result
def _switch(data, pred, name=None): r"""Forwards `data` to the output port determined by `pred`. If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, the data goes to `output_false`. See also `RefSwitch` and `Merge`. Args: data: A `Tensor`. The tensor to be forwarded to the appropriate output. pred: A `Tensor` of type `bool`. A scalar that specifies which output port will receive data. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output_false, output_true). output_false: A `Tensor`. Has the same type as `data`. output_true: A `Tensor`. Has the same type as `data`. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("Switch", data=data, pred=pred, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) else: _attr_T, (data, ) = _execute.args_to_matching_eager([data], _ctx) pred = _ops.convert_to_tensor(pred, _dtypes.bool) _inputs_flat = [data, pred] _attrs = ("T", _attr_T) _result = _execute.execute(b"Switch", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("Switch", _inputs_flat, _attrs, _result, name) _result = _SwitchOutput._make(_result) return _result
def stateless_random_normal(shape, seed, dtype=_dtypes.float32, name=None): r"""Outputs deterministic pseudorandom values from a normal distribution. The generated values will have mean 0 and standard deviation 1. The outputs are a deterministic function of `shape` and `seed`. Args: shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. The shape of the output tensor. seed: A `Tensor` of type `int64`. 2 seeds (shape [2]). dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64`. Defaults to `tf.float32`. The type of the output. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. Random values with specified shape. """ if dtype is None: dtype = _dtypes.float32 dtype = _execute.make_type(dtype, "dtype") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "StatelessRandomNormal", shape=shape, seed=seed, dtype=dtype, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "T", _op.get_attr("T")) else: _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32) _attr_T = _attr_T.as_datatype_enum seed = _ops.convert_to_tensor(seed, _dtypes.int64) _inputs_flat = [shape, seed] _attrs = ("dtype", dtype, "T", _attr_T) _result = _execute.execute(b"StatelessRandomNormal", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "StatelessRandomNormal", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def resampler_grad(data, warp, grad_output, name=None): r"""Resampler Grad op. Args: data: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. warp: A `Tensor`. Must have the same type as `data`. grad_output: A `Tensor`. Must have the same type as `data`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (grad_data, grad_warp). grad_data: A `Tensor`. Has the same type as `data`. grad_warp: A `Tensor`. Has the same type as `data`. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("ResamplerGrad", data=data, warp=warp, grad_output=grad_output, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) else: _attr_T, _inputs_T = _execute.args_to_matching_eager( [data, warp, grad_output], _ctx) (data, warp, grad_output) = _inputs_T _attr_T = _attr_T.as_datatype_enum _inputs_flat = [data, warp, grad_output] _attrs = ("T", _attr_T) _result = _execute.execute(b"ResamplerGrad", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("ResamplerGrad", _inputs_flat, _attrs, _result, name) _result = _ResamplerGradOutput._make(_result) return _result
def nccl_broadcast_send(input, num_devices, shared_name, name=None): r"""Sends `input` to the NcclBroadcastRecv ops registered in the same `shared_name`. The graph should be constructed so that one device runs `NcclBroadcastSend` and `num_devices-1` devices run NcclBroadcastRecv ops with shared_name value `c`. Failure to do so will cause the graph execution to fail to complete. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`. The input to the broadcast num_devices: An `int`. The number of devices participating in this reduction. shared_name: A `string`. Identifier that is shared between ops of the same broadcast. name: A name for the operation (optional). Returns: The created Operation. """ num_devices = _execute.make_int(num_devices, "num_devices") shared_name = _execute.make_str(shared_name, "shared_name") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("NcclBroadcastSend", input=input, num_devices=num_devices, shared_name=shared_name, name=name) return _op else: _attr_T, (input, ) = _execute.args_to_matching_eager([input], _ctx) _attr_T = _attr_T.as_datatype_enum _inputs_flat = [input] _attrs = ("T", _attr_T, "num_devices", num_devices, "shared_name", shared_name) _result = _execute.execute(b"NcclBroadcastSend", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def collective_reduce_eager_fallback(input, group_size, group_key, instance_key, merge_op, final_op, subdiv_offsets, wait_for, communication_hint, name, ctx): group_size = _execute.make_int(group_size, "group_size") group_key = _execute.make_int(group_key, "group_key") instance_key = _execute.make_int(instance_key, "instance_key") merge_op = _execute.make_str(merge_op, "merge_op") final_op = _execute.make_str(final_op, "final_op") if not isinstance(subdiv_offsets, (list, tuple)): raise TypeError("Expected list for 'subdiv_offsets' argument to " "'collective_reduce' Op, not %r." % subdiv_offsets) subdiv_offsets = [ _execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets ] if wait_for is None: wait_for = [] if not isinstance(wait_for, (list, tuple)): raise TypeError("Expected list for 'wait_for' argument to " "'collective_reduce' Op, not %r." % wait_for) wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for] if communication_hint is None: communication_hint = "auto" communication_hint = _execute.make_str(communication_hint, "communication_hint") _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key, "instance_key", instance_key, "merge_op", merge_op, "final_op", final_op, "subdiv_offsets", subdiv_offsets, "wait_for", wait_for, "communication_hint", communication_hint) _result = _execute.execute(b"CollectiveReduce", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("CollectiveReduce", _inputs_flat, _attrs, _result) _result, = _result return _result
def skip_gram_generate_candidates_eager_fallback(input_tensor, min_skips, max_skips, start, limit, emit_self_as_target, seed=0, seed2=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function skip_gram_generate_candidates """ _ctx = ctx if ctx else _context.context() if seed is None: seed = 0 seed = _execute.make_int(seed, "seed") if seed2 is None: seed2 = 0 seed2 = _execute.make_int(seed2, "seed2") _attr_T, (input_tensor, ) = _execute.args_to_matching_eager([input_tensor], _ctx) min_skips = _ops.convert_to_tensor(min_skips, _dtypes.int32) max_skips = _ops.convert_to_tensor(max_skips, _dtypes.int32) start = _ops.convert_to_tensor(start, _dtypes.int32) limit = _ops.convert_to_tensor(limit, _dtypes.int32) emit_self_as_target = _ops.convert_to_tensor(emit_self_as_target, _dtypes.bool) _inputs_flat = [ input_tensor, min_skips, max_skips, start, limit, emit_self_as_target ] _attrs = ("T", _attr_T, "seed", seed, "seed2", seed2) _result = _execute.execute(b"SkipGramGenerateCandidates", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("SkipGramGenerateCandidates", _inputs_flat, _attrs, _result, name) _result = _SkipGramGenerateCandidatesOutput._make(_result) return _result
def tensor_list_push_back(input_handle, tensor, name=None): r"""Returns a list list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. tensor: The tensor to put on the list. input_handle: The old list. output_handle: A list with the elements of the old list followed by tensor. element_dtype: the type of elements in the list. element_shape: a shape compatible with that of elements in the list. Args: input_handle: A `Tensor` of type `variant`. tensor: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor` of type `variant`. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("TensorListPushBack", input_handle=input_handle, tensor=tensor, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("element_dtype", _op.get_attr("element_dtype")) else: _attr_element_dtype, (tensor, ) = _execute.args_to_matching_eager( [tensor], _ctx) input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) _inputs_flat = [input_handle, tensor] _attrs = ("element_dtype", _attr_element_dtype) _result = _execute.execute(b"TensorListPushBack", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("TensorListPushBack", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def assign_sub_variable_op(resource, value, name=None): r"""Subtracts a value from the current value of a variable. Any ReadVariableOp which depends directly or indirectly on this assign is guaranteed to see the incremented value or a subsequent newer one. Outputs the incremented value, which can be used to totally order the increments to this variable. Args: resource: A `Tensor` of type `resource`. handle to the resource in which to store the variable. value: A `Tensor`. the value by which the variable will be incremented. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("AssignSubVariableOp", resource=resource, value=value, name=name) return _op else: _attr_dtype, (value, ) = _execute.args_to_matching_eager([value], _ctx) resource = _ops.convert_to_tensor(resource, _dtypes.resource) _inputs_flat = [resource, value] _attrs = ("dtype", _attr_dtype) _result = _execute.execute(b"AssignSubVariableOp", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _result = None return _result
def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function debug_numeric_summary """ _ctx = ctx if ctx else _context.context() if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_numeric_summary' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if lower_bound is None: lower_bound = float('-inf') lower_bound = _execute.make_float(lower_bound, "lower_bound") if upper_bound is None: upper_bound = float('inf') upper_bound = _execute.make_float(upper_bound, "upper_bound") if mute_if_healthy is None: mute_if_healthy = False mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy") if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound, "upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy, "gated_grpc", gated_grpc) _result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DebugNumericSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def ragged_range_eager_fallback(starts, limits, deltas, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function ragged_range """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager( [starts, limits, deltas], _ctx, _dtypes.int32) (starts, limits, deltas) = _inputs_T _inputs_flat = [starts, limits, deltas] _attrs = ("T", _attr_T) _result = _execute.execute(b"RaggedRange", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("RaggedRange", _inputs_flat, _attrs, _result, name) _result = _RaggedRangeOutput._make(_result) return _result
def gru_block_cell_eager_fallback(x, h_prev, w_ru, w_c, b_ru, b_c, name=None): r"""This is the slowpath function for Eager mode. This is for function gru_block_cell """ _ctx = _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager( [x, h_prev, w_ru, w_c, b_ru, b_c], _ctx) (x, h_prev, w_ru, w_c, b_ru, b_c) = _inputs_T _inputs_flat = [x, h_prev, w_ru, w_c, b_ru, b_c] _attrs = ("T", _attr_T) _result = _execute.execute(b"GRUBlockCell", 4, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("GRUBlockCell", _inputs_flat, _attrs, _result, name) _result = _GRUBlockCellOutput._make(_result) return _result
def nccl_all_reduce_eager_fallback(input, reduction, num_devices, shared_name, name, ctx): reduction = _execute.make_str(reduction, "reduction") num_devices = _execute.make_int(num_devices, "num_devices") shared_name = _execute.make_str(shared_name, "shared_name") _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("reduction", reduction, "T", _attr_T, "num_devices", num_devices, "shared_name", shared_name) _result = _execute.execute(b"NcclAllReduce", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("NcclAllReduce", _inputs_flat, _attrs, _result) _result, = _result return _result
def nccl_reduce_eager_fallback(input, reduction, name, ctx): if not isinstance(input, (list, tuple)): raise TypeError("Expected list for 'input' argument to " "'nccl_reduce' Op, not %r." % input) _attr_num_devices = len(input) reduction = _execute.make_str(reduction, "reduction") _attr_T, input = _execute.args_to_matching_eager(list(input), ctx) _inputs_flat = list(input) _attrs = ("reduction", reduction, "T", _attr_T, "num_devices", _attr_num_devices) _result = _execute.execute(b"NcclReduce", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("NcclReduce", _inputs_flat, _attrs, _result) _result, = _result return _result
def block_lstmv2_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, cell_clip, use_peephole, name, ctx): if cell_clip is None: cell_clip = 0 cell_clip = _execute.make_float(cell_clip, "cell_clip") if use_peephole is None: use_peephole = False use_peephole = _execute.make_bool(use_peephole, "use_peephole") _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], ctx) (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64) _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b] _attrs = ("cell_clip", cell_clip, "use_peephole", use_peephole, "T", _attr_T) _result = _execute.execute(b"BlockLSTMV2", 7, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "BlockLSTMV2", _inputs_flat, _attrs, _result) _result = _BlockLSTMV2Output._make(_result) return _result
def stateful_uniform_full_int_eager_fallback(resource, algorithm, shape, dtype=_dtypes.uint64, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function stateful_uniform_full_int """ _ctx = ctx if ctx else _context.context() if dtype is None: dtype = _dtypes.uint64 dtype = _execute.make_type(dtype, "dtype") _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int64) resource = _ops.convert_to_tensor(resource, _dtypes.resource) algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) _inputs_flat = [resource, algorithm, shape] _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) _result = _execute.execute(b"StatefulUniformFullInt", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "StatefulUniformFullInt", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def resampler_grad_eager_fallback(data, warp, grad_output, name=None): r"""This is the slowpath function for Eager mode. This is for function resampler_grad """ _ctx = _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager( [data, warp, grad_output], _ctx) (data, warp, grad_output) = _inputs_T _inputs_flat = [data, warp, grad_output] _attrs = ("T", _attr_T) _result = _execute.execute(b"ResamplerGrad", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("ResamplerGrad", _inputs_flat, _attrs, _result, name) _result = _ResamplerGradOutput._make(_result) return _result
def stateful_standard_normal_eager_fallback(resource, shape, dtype, name, ctx): if dtype is None: dtype = _dtypes.float32 dtype = _execute.make_type(dtype, "dtype") _attr_shape_dtype, (shape, ) = _execute.args_to_matching_eager( [shape], ctx, _dtypes.int64) resource = _ops.convert_to_tensor(resource, _dtypes.resource) _inputs_flat = [resource, shape] _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) _result = _execute.execute(b"StatefulStandardNormal", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient("StatefulStandardNormal", _inputs_flat, _attrs, _result) _result, = _result return _result
def stateless_if_eager_fallback(cond, input, Tout, then_branch, else_branch, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function stateless_if """ _ctx = ctx if ctx else _context.context() if not isinstance(Tout, (list, tuple)): raise TypeError( "Expected list for 'Tout' argument to " "'stateless_if' Op, not %r." % Tout) Tout = [_execute.make_type(_t, "Tout") for _t in Tout] _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], _ctx) _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx) _inputs_flat = [cond] + list(input) _attrs = ("Tcond", _attr_Tcond, "Tin", _attr_Tin, "Tout", Tout, "then_branch", then_branch, "else_branch", else_branch) _result = _execute.execute(b"StatelessIf", len(Tout), inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "StatelessIf", _inputs_flat, _attrs, _result, name) return _result
def unbatch_grad_eager_fallback(original_input, batch_index, grad, id, container, shared_name, name, ctx): if container is None: container = "" container = _execute.make_str(container, "container") if shared_name is None: shared_name = "" shared_name = _execute.make_str(shared_name, "shared_name") _attr_T, _inputs_T = _execute.args_to_matching_eager([original_input, grad], ctx) (original_input, grad) = _inputs_T batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64) id = _ops.convert_to_tensor(id, _dtypes.int64) _inputs_flat = [original_input, batch_index, grad, id] _attrs = ("container", container, "shared_name", shared_name, "T", _attr_T) _result = _execute.execute(b"UnbatchGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "UnbatchGrad", _inputs_flat, _attrs, _result) _result, = _result return _result
def _histogram_summary_eager_fallback(tag, values, name=None): r"""This is the slowpath function for Eager mode. This is for function _histogram_summary """ _ctx = _context.context() _attr_T, (values, ) = _execute.args_to_matching_eager([values], _ctx, _dtypes.float32) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [tag, values] _attrs = ("T", _attr_T) _result = _execute.execute(b"HistogramSummary", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("HistogramSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result