def periodic_resample_op_grad_eager_fallback(grad,
                                             original_shape,
                                             desired_shape,
                                             name=None,
                                             ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function periodic_resample_op_grad
  """
    _ctx = ctx if ctx else _context.context()
    original_shape = _execute.make_shape(original_shape, "original_shape")
    desired_shape = _execute.make_shape(desired_shape, "desired_shape")
    _attr_T, (grad, ) = _execute.args_to_matching_eager([grad], _ctx)
    _inputs_flat = [grad]
    _attrs = ("T", _attr_T, "original_shape", original_shape, "desired_shape",
              desired_shape)
    _result = _execute.execute(b"PeriodicResampleOpGrad",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("PeriodicResampleOpGrad", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
  _ctx = ctx if ctx else _context.context()
  if hidden_surface_removal is None:
    hidden_surface_removal = True
  hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
  if convergence_dots_size is None:
    convergence_dots_size = 8
  convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
  if dots_per_inch is None:
    dots_per_inch = 72
  dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
  if eye_separation is None:
    eye_separation = 2.5
  eye_separation = _execute.make_float(eye_separation, "eye_separation")
  if mu is None:
    mu = 0.3333
  mu = _execute.make_float(mu, "mu")
  if normalize is None:
    normalize = True
  normalize = _execute.make_bool(normalize, "normalize")
  if normalize_max is None:
    normalize_max = -100
  normalize_max = _execute.make_float(normalize_max, "normalize_max")
  if normalize_min is None:
    normalize_min = 100
  normalize_min = _execute.make_float(normalize_min, "normalize_min")
  if border_level is None:
    border_level = 0
  border_level = _execute.make_float(border_level, "border_level")
  if number_colors is None:
    number_colors = 256
  number_colors = _execute.make_int(number_colors, "number_colors")
  if output_image_shape is None:
    output_image_shape = [1024, 768, 1]
  output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
  if output_data_window is None:
    output_data_window = [1022, 757]
  output_data_window = _execute.make_shape(output_data_window, "output_data_window")
  _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx)
  _inputs_flat = [depth_values]
  _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
  "convergence_dots_size", convergence_dots_size, "dots_per_inch",
  dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize",
  normalize, "normalize_max", normalize_max, "normalize_min", normalize_min,
  "border_level", border_level, "number_colors", number_colors,
  "output_image_shape", output_image_shape, "output_data_window",
  output_data_window)
  _result = _execute.execute(b"SingleImageRandomDotStereograms", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
  _ctx = ctx if ctx else _context.context()
  if hidden_surface_removal is None:
    hidden_surface_removal = True
  hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
  if convergence_dots_size is None:
    convergence_dots_size = 8
  convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
  if dots_per_inch is None:
    dots_per_inch = 72
  dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
  if eye_separation is None:
    eye_separation = 2.5
  eye_separation = _execute.make_float(eye_separation, "eye_separation")
  if mu is None:
    mu = 0.3333
  mu = _execute.make_float(mu, "mu")
  if normalize is None:
    normalize = True
  normalize = _execute.make_bool(normalize, "normalize")
  if normalize_max is None:
    normalize_max = -100
  normalize_max = _execute.make_float(normalize_max, "normalize_max")
  if normalize_min is None:
    normalize_min = 100
  normalize_min = _execute.make_float(normalize_min, "normalize_min")
  if border_level is None:
    border_level = 0
  border_level = _execute.make_float(border_level, "border_level")
  if number_colors is None:
    number_colors = 256
  number_colors = _execute.make_int(number_colors, "number_colors")
  if output_image_shape is None:
    output_image_shape = [1024, 768, 1]
  output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
  if output_data_window is None:
    output_data_window = [1022, 757]
  output_data_window = _execute.make_shape(output_data_window, "output_data_window")
  _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx)
  _inputs_flat = [depth_values]
  _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
  "convergence_dots_size", convergence_dots_size, "dots_per_inch",
  dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize",
  normalize, "normalize_max", normalize_max, "normalize_min", normalize_min,
  "border_level", border_level, "number_colors", number_colors,
  "output_image_shape", output_image_shape, "output_data_window",
  output_data_window)
  _result = _execute.execute(b"SingleImageRandomDotStereograms", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 4
0
def periodic_resample_op_grad(grad, original_shape, desired_shape, name=None):
    r"""TODO: add doc.

  Args:
    grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
    original_shape: A `tf.TensorShape` or list of `ints`.
    desired_shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `grad`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        original_shape = _execute.make_shape(original_shape, "original_shape")
        desired_shape = _execute.make_shape(desired_shape, "desired_shape")
        _, _, _op = _op_def_lib._apply_op_helper("PeriodicResampleOpGrad",
                                                 grad=grad,
                                                 original_shape=original_shape,
                                                 desired_shape=desired_shape,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "original_shape",
                  _op.get_attr("original_shape"), "desired_shape",
                  _op.get_attr("desired_shape"))
        _execute.record_gradient("PeriodicResampleOpGrad", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "PeriodicResampleOpGrad", name, _ctx._post_execution_callbacks,
                grad, "original_shape", original_shape, "desired_shape",
                desired_shape)
            return _result
        except _core._FallbackException:
            return periodic_resample_op_grad_eager_fallback(
                grad,
                original_shape=original_shape,
                desired_shape=desired_shape,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
def collective_bcast_recv_eager_fallback(T,
                                         group_size,
                                         group_key,
                                         instance_key,
                                         shape,
                                         name=None,
                                         ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function collective_bcast_recv
  """
    _ctx = ctx if ctx else _context.context()
    T = _execute.make_type(T, "T")
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    _inputs_flat = []
    _attrs = ("T", T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "shape", shape)
    _result = _execute.execute(b"CollectiveBcastRecv",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("CollectiveBcastRecv", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Esempio n. 6
0
def csv_dataset_eager_fallback(filenames, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols, record_defaults, output_shapes, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function csv_dataset
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'csv_dataset' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_output_types, record_defaults = _execute.convert_to_mixed_eager_tensors(record_defaults, _ctx)
  filenames = _ops.convert_to_tensor(filenames, _dtypes.string)
  buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64)
  header = _ops.convert_to_tensor(header, _dtypes.bool)
  field_delim = _ops.convert_to_tensor(field_delim, _dtypes.string)
  use_quote_delim = _ops.convert_to_tensor(use_quote_delim, _dtypes.bool)
  na_value = _ops.convert_to_tensor(na_value, _dtypes.string)
  select_cols = _ops.convert_to_tensor(select_cols, _dtypes.int64)
  _inputs_flat = [filenames, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols] + list(record_defaults)
  _attrs = ("output_types", _attr_output_types, "output_shapes",
  output_shapes)
  _result = _execute.execute(b"CSVDataset", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "CSVDataset", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 7
0
def stateless_if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'stateless_if' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if output_shapes is None:
    output_shapes = []
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'stateless_if' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], ctx)
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
  _inputs_flat = [cond] + list(input)
  _attrs = ("Tcond", _attr_Tcond, "Tin", _attr_Tin, "Tout", Tout,
  "then_branch", then_branch, "else_branch", else_branch, "output_shapes",
  output_shapes)
  _result = _execute.execute(b"StatelessIf", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "StatelessIf", _inputs_flat, _attrs, _result)
  return _result
def collective_gather_eager_fallback(input,
                                     group_size,
                                     group_key,
                                     instance_key,
                                     shape,
                                     name=None,
                                     ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function collective_gather
  """
    _ctx = ctx if ctx else _context.context()
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], _ctx)
    _inputs_flat = [input]
    _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "shape", shape)
    _result = _execute.execute(b"CollectiveGather",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("CollectiveGather", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
def collective_gather_eager_fallback(input, group_size, group_key,
                                     instance_key, shape, communication_hint,
                                     timeout_seconds, name, ctx):
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    if communication_hint is None:
        communication_hint = "auto"
    communication_hint = _execute.make_str(communication_hint,
                                           "communication_hint")
    if timeout_seconds is None:
        timeout_seconds = 0
    timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx)
    _inputs_flat = [input]
    _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "shape", shape,
              "communication_hint", communication_hint, "timeout_seconds",
              timeout_seconds)
    _result = _execute.execute(b"CollectiveGather",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("CollectiveGather", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 10
0
def collective_bcast_recv_eager_fallback(T, group_size, group_key,
                                         instance_key, shape,
                                         communication_hint, timeout_seconds,
                                         name, ctx):
    T = _execute.make_type(T, "T")
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    if communication_hint is None:
        communication_hint = "auto"
    communication_hint = _execute.make_str(communication_hint,
                                           "communication_hint")
    if timeout_seconds is None:
        timeout_seconds = 0
    timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
    _inputs_flat = []
    _attrs = ("T", T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "shape", shape,
              "communication_hint", communication_hint, "timeout_seconds",
              timeout_seconds)
    _result = _execute.execute(b"CollectiveBcastRecv",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("CollectiveBcastRecv", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 11
0
def directed_interleave_dataset_eager_fallback(selector_input_dataset, data_input_datasets, output_types, output_shapes, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function directed_interleave_dataset
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(data_input_datasets, (list, tuple)):
    raise TypeError(
        "Expected list for 'data_input_datasets' argument to "
        "'directed_interleave_dataset' Op, not %r." % data_input_datasets)
  _attr_N = len(data_input_datasets)
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'directed_interleave_dataset' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'directed_interleave_dataset' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  selector_input_dataset = _ops.convert_to_tensor(selector_input_dataset, _dtypes.variant)
  data_input_datasets = _ops.convert_n_to_tensor(data_input_datasets, _dtypes.variant)
  _inputs_flat = [selector_input_dataset] + list(data_input_datasets)
  _attrs = ("output_types", output_types, "output_shapes", output_shapes, "N",
  _attr_N)
  _result = _execute.execute(b"DirectedInterleaveDataset", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "DirectedInterleaveDataset", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 12
0
def _if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function _if
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'if' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if output_shapes is None:
    output_shapes = []
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'if' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], _ctx)
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
  _inputs_flat = [cond] + list(input)
  _attrs = ("Tcond", _attr_Tcond, "Tin", _attr_Tin, "Tout", Tout,
  "then_branch", then_branch, "else_branch", else_branch, "output_shapes",
  output_shapes)
  _result = _execute.execute(b"If", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "If", _inputs_flat, _attrs, _result, name)
  return _result
def var_handle_op_eager_fallback(dtype,
                                 shape,
                                 container="",
                                 shared_name="",
                                 name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function var_handle_op
  """
    _ctx = _context.context()
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _inputs_flat = []
    _attrs = ("container", container, "shared_name", shared_name, "dtype",
              dtype, "shape", shape)
    _result = _execute.execute(b"VarHandleOp",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("VarHandleOp", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 14
0
def case_eager_fallback(branch_index, input, Tout, branches, output_shapes, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'case' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if not isinstance(branches, (list, tuple)):
    raise TypeError(
        "Expected list for 'branches' argument to "
        "'case' Op, not %r." % branches)
  if output_shapes is None:
    output_shapes = []
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'case' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
  branch_index = _ops.convert_to_tensor(branch_index, _dtypes.int32)
  _inputs_flat = [branch_index] + list(input)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "branches", branches,
  "output_shapes", output_shapes)
  _result = _execute.execute(b"Case", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "Case", _inputs_flat, _attrs, _result)
  return _result
Esempio n. 15
0
def balance_dataset_eager_fallback(input_dataset, output_types, output_shapes,
                                   name, ctx):
    if not isinstance(output_types, (list, tuple)):
        raise TypeError("Expected list for 'output_types' argument to "
                        "'balance_dataset' Op, not %r." % output_types)
    output_types = [
        _execute.make_type(_t, "output_types") for _t in output_types
    ]
    if not isinstance(output_shapes, (list, tuple)):
        raise TypeError("Expected list for 'output_shapes' argument to "
                        "'balance_dataset' Op, not %r." % output_shapes)
    output_shapes = [
        _execute.make_shape(_s, "output_shapes") for _s in output_shapes
    ]
    input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant)
    _inputs_flat = [input_dataset]
    _attrs = ("output_types", output_types, "output_shapes", output_shapes)
    _result = _execute.execute(b"BalanceDataset",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("BalanceDataset", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 16
0
def thread_pool_dataset_eager_fallback(input_dataset, thread_pool, output_types, output_shapes, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function thread_pool_dataset
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'thread_pool_dataset' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'thread_pool_dataset' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant)
  thread_pool = _ops.convert_to_tensor(thread_pool, _dtypes.resource)
  _inputs_flat = [input_dataset, thread_pool]
  _attrs = ("output_types", output_types, "output_shapes", output_shapes)
  _result = _execute.execute(b"ThreadPoolDataset", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "ThreadPoolDataset", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 17
0
def outfeed_dequeue_tuple(dtypes, shapes, device_ordinal=-1, name=None):
    r"""Retrieve multiple values that will be emitted by the computation as an XLA

  tuple.  This operations will block indefinitely until data is available.
  Output `i` corresponds to XLA tuple element `i`.

  Args:
    dtypes: A list of `tf.DTypes` that has length `>= 1`.
      The element types of each element in `outputs`.
    shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
      The shapes of each tensor in `outputs`.
    device_ordinal: An optional `int`. Defaults to `-1`.
      The TPU device to use. This should be -1 when the Op
      is running on a TPU device, and >= 0 when the Op is running on the CPU
      device.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `dtypes`.
    A list of tensors that will be read from the outfeed.
  """
    if not isinstance(dtypes, (list, tuple)):
        raise TypeError("Expected list for 'dtypes' argument to "
                        "'outfeed_dequeue_tuple' Op, not %r." % dtypes)
    dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
    if not isinstance(shapes, (list, tuple)):
        raise TypeError("Expected list for 'shapes' argument to "
                        "'outfeed_dequeue_tuple' Op, not %r." % shapes)
    shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
    if device_ordinal is None:
        device_ordinal = -1
    device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("OutfeedDequeueTuple",
                                                 dtypes=dtypes,
                                                 shapes=shapes,
                                                 device_ordinal=device_ordinal,
                                                 name=name)
        _result = _op.outputs[:]
        if not _result:
            return _op
        _inputs_flat = _op.inputs
        _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes",
                  _op.get_attr("shapes"), "device_ordinal",
                  _op.get_attr("device_ordinal"))
    else:
        _inputs_flat = []
        _attrs = ("dtypes", dtypes, "shapes", shapes, "device_ordinal",
                  device_ordinal)
        _result = _execute.execute(b"OutfeedDequeueTuple",
                                   len(dtypes),
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("OutfeedDequeueTuple", _inputs_flat, _attrs,
                             _result, name)
    return _result
Esempio n. 18
0
def csv_dataset(filenames, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols, record_defaults, output_shapes, name=None):
  r"""TODO: add doc.

  Args:
    filenames: A `Tensor` of type `string`.
    buffer_size: A `Tensor` of type `int64`.
    header: A `Tensor` of type `bool`.
    field_delim: A `Tensor` of type `string`.
    use_quote_delim: A `Tensor` of type `bool`.
    na_value: A `Tensor` of type `string`.
    select_cols: A `Tensor` of type `int64`.
    record_defaults: A list of `Tensor` objects with types from: `float32`, `float64`, `int32`, `int64`, `string`.
    output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `variant`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(output_shapes, (list, tuple)):
      raise TypeError(
          "Expected list for 'output_shapes' argument to "
          "'csv_dataset' Op, not %r." % output_shapes)
    output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
    _, _, _op = _op_def_lib._apply_op_helper(
        "CSVDataset", filenames=filenames, buffer_size=buffer_size,
        header=header, field_delim=field_delim,
        use_quote_delim=use_quote_delim, na_value=na_value,
        select_cols=select_cols, record_defaults=record_defaults,
        output_shapes=output_shapes, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes",
              _op.get_attr("output_shapes"))
    _execute.record_gradient(
      "CSVDataset", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "CSVDataset",
        name, _ctx._post_execution_callbacks, filenames, buffer_size, header,
        field_delim, use_quote_delim, na_value, select_cols, record_defaults,
        "output_shapes", output_shapes)
      return _result
    except _core._FallbackException:
      return csv_dataset_eager_fallback(
          filenames, buffer_size, header, field_delim, use_quote_delim,
          na_value, select_cols, record_defaults, output_shapes=output_shapes,
          name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 19
0
def var_handle_op(dtype, shape, container="", shared_name="", name=None):
  r"""Creates a handle to a Variable resource.

  Args:
    dtype: A `tf.DType`.
      the type of this variable. Must agree with the dtypes
      of all ops using this variable.
    shape: A `tf.TensorShape` or list of `ints`.
      The (possibly partially specified) shape of this variable.
    container: An optional `string`. Defaults to `""`.
      the container this variable is placed in.
    shared_name: An optional `string`. Defaults to `""`.
      the name by which this variable is referred to.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `resource`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    if container is None:
      container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
      shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _, _, _op = _op_def_lib._apply_op_helper(
        "VarHandleOp", dtype=dtype, shape=shape, container=container,
        shared_name=shared_name, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("container", _op.get_attr("container"), "shared_name",
              _op.get_attr("shared_name"), "dtype", _op.get_attr("dtype"),
              "shape", _op.get_attr("shape"))
    _execute.record_gradient(
      "VarHandleOp", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "VarHandleOp", name,
        _ctx._post_execution_callbacks, "container", container, "shared_name",
        shared_name, "dtype", dtype, "shape", shape)
      return _result
    except _core._FallbackException:
      return var_handle_op_eager_fallback(
          container=container, shared_name=shared_name, dtype=dtype,
          shape=shape, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def unique_dataset(input_dataset, output_types, output_shapes, name=None):
    r"""Creates a dataset that contains the unique elements of `input_dataset`.

  Args:
    input_dataset: A `Tensor` of type `variant`.
    output_types: A list of `tf.DTypes` that has length `>= 1`.
    output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `variant`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        if not isinstance(output_types, (list, tuple)):
            raise TypeError("Expected list for 'output_types' argument to "
                            "'unique_dataset' Op, not %r." % output_types)
        output_types = [
            _execute.make_type(_t, "output_types") for _t in output_types
        ]
        if not isinstance(output_shapes, (list, tuple)):
            raise TypeError("Expected list for 'output_shapes' argument to "
                            "'unique_dataset' Op, not %r." % output_shapes)
        output_shapes = [
            _execute.make_shape(_s, "output_shapes") for _s in output_shapes
        ]
        _, _, _op = _op_def_lib._apply_op_helper("UniqueDataset",
                                                 input_dataset=input_dataset,
                                                 output_types=output_types,
                                                 output_shapes=output_shapes,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("output_types", _op.get_attr("output_types"),
                  "output_shapes", _op.get_attr("output_shapes"))
        _execute.record_gradient("UniqueDataset", _inputs_flat, _attrs,
                                 _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "UniqueDataset", name,
                _ctx._post_execution_callbacks, input_dataset, "output_types",
                output_types, "output_shapes", output_shapes)
            return _result
        except _core._FallbackException:
            return unique_dataset_eager_fallback(input_dataset,
                                                 output_types=output_types,
                                                 output_shapes=output_shapes,
                                                 name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 21
0
def thread_pool_dataset(input_dataset, thread_pool, output_types, output_shapes, name=None):
  r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`.

  Args:
    input_dataset: A `Tensor` of type `variant`.
    thread_pool: A `Tensor` of type `resource`.
    output_types: A list of `tf.DTypes` that has length `>= 1`.
    output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `variant`.
    A resource produced by the ThreadPoolHandle op.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(output_types, (list, tuple)):
      raise TypeError(
          "Expected list for 'output_types' argument to "
          "'thread_pool_dataset' Op, not %r." % output_types)
    output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
    if not isinstance(output_shapes, (list, tuple)):
      raise TypeError(
          "Expected list for 'output_shapes' argument to "
          "'thread_pool_dataset' Op, not %r." % output_shapes)
    output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
    _, _, _op = _op_def_lib._apply_op_helper(
        "ThreadPoolDataset", input_dataset=input_dataset,
        thread_pool=thread_pool, output_types=output_types,
        output_shapes=output_shapes, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes",
              _op.get_attr("output_shapes"))
    _execute.record_gradient(
      "ThreadPoolDataset", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "ThreadPoolDataset", name, _ctx._post_execution_callbacks,
        input_dataset, thread_pool, "output_types", output_types,
        "output_shapes", output_shapes)
      return _result
    except _core._FallbackException:
      return thread_pool_dataset_eager_fallback(
          input_dataset, thread_pool, output_types=output_types,
          output_shapes=output_shapes, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 22
0
def fake_param(dtype, shape, name=None):
    r"""  This op is used as a placeholder in If branch functions. It doesn't provide a

  valid output when run, so must either be removed (e.g. replaced with a

  function input) or guaranteed not to be used (e.g. if mirroring an

  intermediate output needed for the gradient computation of the other branch).


  Args:
    dtype: A `tf.DType`. The type of the output.
    shape: A `tf.TensorShape` or list of `ints`.
          The purported shape of the output. This is only used for shape inference;

          the output will not necessarily have this shape. Can be a partial shape.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        dtype = _execute.make_type(dtype, "dtype")
        shape = _execute.make_shape(shape, "shape")
        _, _, _op = _op_def_lib._apply_op_helper("FakeParam",
                                                 dtype=dtype,
                                                 shape=shape,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"), "shape",
                  _op.get_attr("shape"))
        _execute.record_gradient("FakeParam", _inputs_flat, _attrs, _result,
                                 name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "FakeParam", name, _ctx._post_execution_callbacks, "dtype",
                dtype, "shape", shape)
            return _result
        except _core._FallbackException:
            return fake_param_eager_fallback(dtype=dtype,
                                             shape=shape,
                                             name=name,
                                             ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 23
0
def collective_bcast_recv(T, group_size, group_key, instance_key, shape, name=None):
  r"""Receives a tensor value broadcast from another device.

  Args:
    T: A `tf.DType` from: `tf.float32, tf.half, tf.float64, tf.int32, tf.int64`.
    group_size: An `int`.
    group_key: An `int`.
    instance_key: An `int`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `T`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "CollectiveBcastRecv", name, _ctx._post_execution_callbacks, "T", T,
        "group_size", group_size, "group_key", group_key, "instance_key",
        instance_key, "shape", shape)
      return _result
    except _core._FallbackException:
      try:
        return collective_bcast_recv_eager_fallback(
            T=T, group_size=group_size, group_key=group_key,
            instance_key=instance_key, shape=shape, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  T = _execute.make_type(T, "T")
  group_size = _execute.make_int(group_size, "group_size")
  group_key = _execute.make_int(group_key, "group_key")
  instance_key = _execute.make_int(instance_key, "instance_key")
  shape = _execute.make_shape(shape, "shape")
  _, _, _op = _op_def_lib._apply_op_helper(
        "CollectiveBcastRecv", T=T, group_size=group_size,
                               group_key=group_key, instance_key=instance_key,
                               shape=shape, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"),
            "group_key", _op.get_attr("group_key"), "instance_key",
            _op.get_attr("instance_key"), "shape", _op.get_attr("shape"))
  _execute.record_gradient(
      "CollectiveBcastRecv", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 24
0
def nccl_broadcast(input, shape, name=None):
    r"""Sends `input` to all devices that are connected to the output.

  Sends `input` to all devices that are connected to the output.

  The graph should be constructed so that all ops connected to the output have a
  valid device assignment, and the op itself is assigned one of these devices.

  input: The input to the broadcast.
  output: The same as input.
  shape: The shape of the input tensor.

  Args:
    input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "NcclBroadcast", name, _ctx._post_execution_callbacks, input,
                "shape", shape)
            return _result
        except _core._FallbackException:
            try:
                return nccl_broadcast_eager_fallback(input,
                                                     shape=shape,
                                                     name=name,
                                                     ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    shape = _execute.make_shape(shape, "shape")
    _, _, _op = _op_def_lib._apply_op_helper("NcclBroadcast",
                                             input=input,
                                             shape=shape,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op._get_attr_type("T"), "shape", _op.get_attr("shape"))
    _execute.record_gradient("NcclBroadcast", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 25
0
def collective_gather(input, group_size, group_key, instance_key, shape, name=None):
  r"""Mutually accumulates multiple tensors of identical type and shape.

  Args:
    input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`.
    group_size: An `int`.
    group_key: An `int`.
    instance_key: An `int`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "CollectiveGather", name, _ctx._post_execution_callbacks, input,
        "group_size", group_size, "group_key", group_key, "instance_key",
        instance_key, "shape", shape)
      return _result
    except _core._FallbackException:
      try:
        return collective_gather_eager_fallback(
            input, group_size=group_size, group_key=group_key,
            instance_key=instance_key, shape=shape, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  group_size = _execute.make_int(group_size, "group_size")
  group_key = _execute.make_int(group_key, "group_key")
  instance_key = _execute.make_int(instance_key, "instance_key")
  shape = _execute.make_shape(shape, "shape")
  _, _, _op = _op_def_lib._apply_op_helper(
        "CollectiveGather", input=input, group_size=group_size,
                            group_key=group_key, instance_key=instance_key,
                            shape=shape, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"),
            "group_key", _op.get_attr("group_key"), "instance_key",
            _op.get_attr("instance_key"), "shape", _op.get_attr("shape"))
  _execute.record_gradient(
      "CollectiveGather", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def var_handle_op(dtype, shape, container="", shared_name="", name=None):
    r"""Creates a handle to a Variable resource.

  Args:
    dtype: A `tf.DType`.
      the type of this variable. Must agree with the dtypes

      of all ops using this variable.
    shape: A `tf.TensorShape` or list of `ints`.
      The (possibly partially specified) shape of this variable.
    container: An optional `string`. Defaults to `""`.
      the container this variable is placed in.
    shared_name: An optional `string`. Defaults to `""`.
      the name by which this variable is referred to.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `resource`.
  """
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("VarHandleOp",
                                                 dtype=dtype,
                                                 shape=shape,
                                                 container=container,
                                                 shared_name=shared_name,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("container", _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "dtype", _op.get_attr("dtype"),
                  "shape", _op.get_attr("shape"))
    else:
        _inputs_flat = []
        _attrs = ("container", container, "shared_name", shared_name, "dtype",
                  dtype, "shape", shape)
        _result = _execute.execute(b"VarHandleOp",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("VarHandleOp", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 27
0
def nccl_broadcast_eager_fallback(input, shape, name, ctx):
  shape = _execute.make_shape(shape, "shape")
  _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
  _inputs_flat = [input]
  _attrs = ("T", _attr_T, "shape", shape)
  _result = _execute.execute(b"NcclBroadcast", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "NcclBroadcast", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
Esempio n. 28
0
def zero_var_initializer(var, dtype, shape, name=None):
    r"""Initialize 'var' with all zeros. This op requires that the resource var is not

  initialized. The var will first be allocated memory, then be filled with all
  zeros. This op is intended to save memory during initialization,
  if you use this op, you should not run initializer of the var.

  Args:
    var: A `Tensor` of type `resource`. Should be a ResourceVariable.
    dtype: A `tf.DType`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    Same as "var".
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        dtype = _execute.make_type(dtype, "dtype")
        shape = _execute.make_shape(shape, "shape")
        _, _, _op = _op_def_lib._apply_op_helper("ZeroVarInitializer",
                                                 var=var,
                                                 dtype=dtype,
                                                 shape=shape,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"), "shape",
                  _op.get_attr("shape"))
        _execute.record_gradient("ZeroVarInitializer", _inputs_flat, _attrs,
                                 _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "ZeroVarInitializer", name, _ctx._post_execution_callbacks,
                var, "dtype", dtype, "shape", shape)
            return _result
        except _core._FallbackException:
            return zero_var_initializer_eager_fallback(var,
                                                       dtype=dtype,
                                                       shape=shape,
                                                       name=name,
                                                       ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 29
0
def fake_param_eager_fallback(dtype, shape, name, ctx):
  dtype = _execute.make_type(dtype, "dtype")
  shape = _execute.make_shape(shape, "shape")
  _inputs_flat = []
  _attrs = ("dtype", dtype, "shape", shape)
  _result = _execute.execute(b"FakeParam", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "FakeParam", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
Esempio n. 30
0
def nccl_broadcast(input, shape, name=None):
    r"""Sends `input` to all devices that are connected to the output.

  Sends `input` to all devices that are connected to the output.

  The graph should be constructed so that all ops connected to the output have a
  valid device assignment, and the op itself is assigned one of these devices.

  input: The input to the broadcast.
  output: The same as input.
  shape: The shape of the input tensor.

  Args:
    input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx, "NcclBroadcast", name, input, "shape", shape)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return nccl_broadcast_eager_fallback(input,
                                                 shape=shape,
                                                 name=name,
                                                 ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
    # Add nodes to the TensorFlow graph.
    shape = _execute.make_shape(shape, "shape")
    _, _, _op, _outputs = _op_def_library._apply_op_helper("NcclBroadcast",
                                                           input=input,
                                                           shape=shape,
                                                           name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("T", _op._get_attr_type("T"), "shape", _op.get_attr("shape"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("NcclBroadcast", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 31
0
def outfeed_dequeue(dtype, shape, device_ordinal=-1, name=None):
    r"""Retrieves a single tensor from the computation outfeed.  This operation will

  block indefinitely until data is available.

  Args:
    dtype: A `tf.DType`. The type of elements in the tensor.
    shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
    device_ordinal: An optional `int`. Defaults to `-1`.
      The TPU device to use. This should be -1 when the Op
      is running on a TPU device, and >= 0 when the Op is running on the CPU
      device.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
    A tensor that will be read from the device outfeed.
  """
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    if device_ordinal is None:
        device_ordinal = -1
    device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("OutfeedDequeue",
                                                 dtype=dtype,
                                                 shape=shape,
                                                 device_ordinal=device_ordinal,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"), "shape",
                  _op.get_attr("shape"), "device_ordinal",
                  _op.get_attr("device_ordinal"))
    else:
        _inputs_flat = []
        _attrs = ("dtype", dtype, "shape", shape, "device_ordinal",
                  device_ordinal)
        _result = _execute.execute(b"OutfeedDequeue",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("OutfeedDequeue", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 32
0
def zero_var_initializer_eager_fallback(var, dtype, shape, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function zero_var_initializer
  """
  _ctx = ctx if ctx else _context.context()
  dtype = _execute.make_type(dtype, "dtype")
  shape = _execute.make_shape(shape, "shape")
  var = _ops.convert_to_tensor(var, _dtypes.resource)
  _inputs_flat = [var]
  _attrs = ("dtype", dtype, "shape", shape)
  _result = _execute.execute(b"ZeroVarInitializer", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "ZeroVarInitializer", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 33
0
def zero_var_initializer(var, dtype, shape, name=None):
  r"""Initialize 'var' with all zeros. This op requires that the resource var is not

  initialized. The var will first be allocated memory, then be filled with all
  zeros. This op is intended to save memory during initialization,
  if you use this op, you should not run initializer of the var.

  Args:
    var: A `Tensor` of type `resource`. Should be a ResourceVariable.
    dtype: A `tf.DType`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    Same as "var".
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    _, _, _op = _op_def_lib._apply_op_helper(
        "ZeroVarInitializer", var=var, dtype=dtype, shape=shape, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"))
    _execute.record_gradient(
      "ZeroVarInitializer", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "ZeroVarInitializer", name, _ctx._post_execution_callbacks, var,
        "dtype", dtype, "shape", shape)
      return _result
    except _core._FallbackException:
      return zero_var_initializer_eager_fallback(
          var, dtype=dtype, shape=shape, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def single_image_random_dot_stereograms(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None):
  r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if hidden_surface_removal is None:
      hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
    if convergence_dots_size is None:
      convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
    if dots_per_inch is None:
      dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
      eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
      mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
      normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
      normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
      normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
      border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
      number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
      output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
    if output_data_window is None:
      output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window, "output_data_window")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SingleImageRandomDotStereograms", depth_values=depth_values,
        hidden_surface_removal=hidden_surface_removal,
        convergence_dots_size=convergence_dots_size,
        dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
        normalize=normalize, normalize_max=normalize_max,
        normalize_min=normalize_min, border_level=border_level,
        number_colors=number_colors, output_image_shape=output_image_shape,
        output_data_window=output_data_window, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SingleImageRandomDotStereograms", name,
        _ctx._post_execution_callbacks, depth_values,
        "hidden_surface_removal", hidden_surface_removal,
        "convergence_dots_size", convergence_dots_size, "dots_per_inch",
        dots_per_inch, "eye_separation", eye_separation, "mu", mu,
        "normalize", normalize, "normalize_max", normalize_max,
        "normalize_min", normalize_min, "border_level", border_level,
        "number_colors", number_colors, "output_image_shape",
        output_image_shape, "output_data_window", output_data_window)
      return _result
    except _core._FallbackException:
      return single_image_random_dot_stereograms_eager_fallback(
          depth_values, hidden_surface_removal=hidden_surface_removal,
          convergence_dots_size=convergence_dots_size,
          dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
          normalize=normalize, normalize_max=normalize_max,
          normalize_min=normalize_min, border_level=border_level,
          number_colors=number_colors, output_image_shape=output_image_shape,
          output_data_window=output_data_window, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)