def StatelessMultinomial(logits,
                         num_samples,
                         seed,
                         output_dtype=_dtypes.int64,
                         name=None):
    return stateless_multinomial(logits=logits,
                                 num_samples=num_samples,
                                 seed=seed,
                                 output_dtype=output_dtype,
                                 name=name)


StatelessMultinomial.__doc__ = stateless_multinomial.__doc__
StatelessMultinomial = _doc_controls.do_not_generate_docs(
    _kwarg_only(StatelessMultinomial))
tf_export("raw_ops.StatelessMultinomial")(StatelessMultinomial)


def stateless_multinomial_eager_fallback(logits,
                                         num_samples,
                                         seed,
                                         output_dtype=_dtypes.int64,
                                         name=None,
                                         ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function stateless_multinomial
  """
    _ctx = ctx if ctx else _context.context()
    if output_dtype is None:
        output_dtype = _dtypes.int64
Beispiel #2
0
                         name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = None
  _execute.record_gradient(
      "MaskedMatmul", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result

def MaskedMatmul(a, b, mask_indices, transpose_a, transpose_b, name=None):
  return masked_matmul(a=a, b=b, mask_indices=mask_indices, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
MaskedMatmul.__doc__ = masked_matmul.__doc__
MaskedMatmul = _doc_controls.do_not_generate_docs(_kwarg_only(MaskedMatmul))
tf_export("raw_ops.MaskedMatmul")(MaskedMatmul)


def masked_matmul_eager_fallback(a, b, mask_indices, transpose_a, transpose_b, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function masked_matmul
  """
  _ctx = ctx if ctx else _context.context()
  a = _ops.convert_to_tensor(a, _dtypes.float32)
  b = _ops.convert_to_tensor(b, _dtypes.float32)
  mask_indices = _ops.convert_to_tensor(mask_indices, _dtypes.int64)
  transpose_a = _ops.convert_to_tensor(transpose_a, _dtypes.bool)
  transpose_b = _ops.convert_to_tensor(transpose_b, _dtypes.bool)
  _inputs_flat = [a, b, mask_indices, transpose_a, transpose_b]
  _attrs = None
Beispiel #3
0
        raise
    return _op
    _result = None
    return _result


def CreateFertileStatsVariable(stats_handle, stats_config, params, name=None):
    return create_fertile_stats_variable(stats_handle=stats_handle,
                                         stats_config=stats_config,
                                         params=params,
                                         name=name)


CreateFertileStatsVariable.__doc__ = create_fertile_stats_variable.__doc__
CreateFertileStatsVariable = _doc_controls.do_not_generate_docs(
    _kwarg_only(CreateFertileStatsVariable))
tf_export("raw_ops.CreateFertileStatsVariable")(CreateFertileStatsVariable)


def create_fertile_stats_variable_eager_fallback(stats_handle,
                                                 stats_config,
                                                 params,
                                                 name=None,
                                                 ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function create_fertile_stats_variable
  """
    _ctx = ctx if ctx else _context.context()
    params = _execute.make_str(params, "params")
    stats_handle = _ops.convert_to_tensor(stats_handle, _dtypes.resource)
    stats_config = _ops.convert_to_tensor(stats_config, _dtypes.string)
          name=None):
    return batch(in_tensors=in_tensors,
                 num_batch_threads=num_batch_threads,
                 max_batch_size=max_batch_size,
                 batch_timeout_micros=batch_timeout_micros,
                 grad_timeout_micros=grad_timeout_micros,
                 max_enqueued_batches=max_enqueued_batches,
                 allowed_batch_sizes=allowed_batch_sizes,
                 container=container,
                 shared_name=shared_name,
                 batching_queue=batching_queue,
                 name=name)


Batch.__doc__ = batch.__doc__
Batch = _doc_controls.do_not_generate_docs(_kwarg_only(Batch))
tf_export("raw_ops.Batch")(Batch)


def batch_eager_fallback(in_tensors,
                         num_batch_threads,
                         max_batch_size,
                         batch_timeout_micros,
                         grad_timeout_micros,
                         max_enqueued_batches=10,
                         allowed_batch_sizes=[],
                         container="",
                         shared_name="",
                         batching_queue="",
                         name=None,
                         ctx=None):
Beispiel #5
0
                           name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("top_k", _op.get_attr("top_k"))
  _execute.record_gradient(
      "BipartiteMatch", _inputs_flat, _attrs, _result, name)
  _result = _BipartiteMatchOutput._make(_result)
  return _result

def BipartiteMatch(distance_mat, num_valid_rows, top_k=-1, name=None):
  return bipartite_match(distance_mat=distance_mat, num_valid_rows=num_valid_rows, top_k=top_k, name=name)
BipartiteMatch.__doc__ = bipartite_match.__doc__
BipartiteMatch = _doc_controls.do_not_generate_docs(_kwarg_only(BipartiteMatch))
tf_export("raw_ops.BipartiteMatch")(BipartiteMatch)


def bipartite_match_eager_fallback(distance_mat, num_valid_rows, top_k=-1, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function bipartite_match
  """
  _ctx = ctx if ctx else _context.context()
  if top_k is None:
    top_k = -1
  top_k = _execute.make_int(top_k, "top_k")
  distance_mat = _ops.convert_to_tensor(distance_mat, _dtypes.float32)
  num_valid_rows = _ops.convert_to_tensor(num_valid_rows, _dtypes.float32)
  _inputs_flat = [distance_mat, num_valid_rows]
  _attrs = ("top_k", top_k)
Beispiel #6
0
                             name)
    _result, = _result
    return _result


def AdjustHsvInYiq(images, delta_h, scale_s, scale_v, name=None):
    return adjust_hsv_in_yiq(images=images,
                             delta_h=delta_h,
                             scale_s=scale_s,
                             scale_v=scale_v,
                             name=name)


AdjustHsvInYiq.__doc__ = adjust_hsv_in_yiq.__doc__
AdjustHsvInYiq = _doc_controls.do_not_generate_docs(
    _kwarg_only(AdjustHsvInYiq))
tf_export("raw_ops.AdjustHsvInYiq")(AdjustHsvInYiq)


def adjust_hsv_in_yiq_eager_fallback(images,
                                     delta_h,
                                     scale_s,
                                     scale_v,
                                     name=None,
                                     ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function adjust_hsv_in_yiq
  """
    _ctx = ctx if ctx else _context.context()
    _attr_T, (images, ) = _execute.args_to_matching_eager([images], _ctx)
    delta_h = _ops.convert_to_tensor(delta_h, _dtypes.float32)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op._get_attr_type("T"), "shape", _op.get_attr("shape"))
    _execute.record_gradient("PeriodicResample", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result


def PeriodicResample(values, shape, name=None):
    return periodic_resample(values=values, shape=shape, name=name)


PeriodicResample.__doc__ = periodic_resample.__doc__
PeriodicResample = _doc_controls.do_not_generate_docs(
    _kwarg_only(PeriodicResample))
tf_export("raw_ops.PeriodicResample")(PeriodicResample)


def periodic_resample_eager_fallback(values, shape, name=None, ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function periodic_resample
  """
    _ctx = ctx if ctx else _context.context()
    shape = _execute.make_shape(shape, "shape")
    _attr_T, (values, ) = _execute.args_to_matching_eager([values], _ctx)
    _inputs_flat = [values]
    _attrs = ("T", _attr_T, "shape", shape)
    _result = _execute.execute(b"PeriodicResample",
                               1,
                               inputs=_inputs_flat,
Beispiel #8
0

def GatherTree(step_ids,
               parent_ids,
               max_sequence_lengths,
               end_token,
               name=None):
    return gather_tree(step_ids=step_ids,
                       parent_ids=parent_ids,
                       max_sequence_lengths=max_sequence_lengths,
                       end_token=end_token,
                       name=name)


GatherTree.__doc__ = gather_tree.__doc__
GatherTree = _doc_controls.do_not_generate_docs(_kwarg_only(GatherTree))
tf_export("raw_ops.GatherTree")(GatherTree)


def gather_tree_eager_fallback(step_ids,
                               parent_ids,
                               max_sequence_lengths,
                               end_token,
                               name=None,
                               ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function gather_tree
  """
    _ctx = ctx if ctx else _context.context()
    _attr_T, _inputs_T = _execute.args_to_matching_eager(
        [step_ids, parent_ids, end_token], _ctx)
Beispiel #9
0
    _execute.record_gradient("NcclAllReduce", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result


def NcclAllReduce(input, reduction, num_devices, shared_name, name=None):
    return nccl_all_reduce(input=input,
                           reduction=reduction,
                           num_devices=num_devices,
                           shared_name=shared_name,
                           name=name)


NcclAllReduce.__doc__ = nccl_all_reduce.__doc__
NcclAllReduce = _doc_controls.do_not_generate_docs(_kwarg_only(NcclAllReduce))
tf_export("raw_ops.NcclAllReduce")(NcclAllReduce)


def nccl_all_reduce_eager_fallback(input,
                                   reduction,
                                   num_devices,
                                   shared_name,
                                   name=None,
                                   ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function nccl_all_reduce
  """
    _ctx = ctx if ctx else _context.context()
    reduction = _execute.make_str(reduction, "reduction")
    num_devices = _execute.make_int(num_devices, "num_devices")

def RaggedGather(params_nested_splits,
                 params_dense_values,
                 indices,
                 OUTPUT_RAGGED_RANK,
                 name=None):
    return ragged_gather(params_nested_splits=params_nested_splits,
                         params_dense_values=params_dense_values,
                         indices=indices,
                         OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK,
                         name=name)


RaggedGather.__doc__ = ragged_gather.__doc__
RaggedGather = _doc_controls.do_not_generate_docs(_kwarg_only(RaggedGather))
tf_export("raw_ops.RaggedGather")(RaggedGather)


def ragged_gather_eager_fallback(params_nested_splits,
                                 params_dense_values,
                                 indices,
                                 OUTPUT_RAGGED_RANK,
                                 name=None,
                                 ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function ragged_gather
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(params_nested_splits, (list, tuple)):
        raise TypeError("Expected list for 'params_nested_splits' argument to "
Beispiel #11
0
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op._get_attr_type("T"))
    _execute.record_gradient("BitwiseAnd", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result


def BitwiseAnd(x, y, name=None):
    return bitwise_and(x=x, y=y, name=name)


BitwiseAnd.__doc__ = bitwise_and.__doc__
BitwiseAnd = _doc_controls.do_not_generate_docs(_kwarg_only(BitwiseAnd))
tf_export("raw_ops.BitwiseAnd")(BitwiseAnd)


def bitwise_and_eager_fallback(x, y, name=None, ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function bitwise_and
  """
    _ctx = ctx if ctx else _context.context()
    _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
    (x, y) = _inputs_T
    _inputs_flat = [x, y]
    _attrs = ("T", _attr_T)
    _result = _execute.execute(b"BitwiseAnd",
                               1,
                               inputs=_inputs_flat,
Beispiel #12
0
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
    _execute.record_gradient("ReduceSliceMax", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result


def ReduceSliceMax(data, indices, axis, name=None):
    return reduce_slice_max(data=data, indices=indices, axis=axis, name=name)


ReduceSliceMax.__doc__ = reduce_slice_max.__doc__
ReduceSliceMax = _doc_controls.do_not_generate_docs(
    _kwarg_only(ReduceSliceMax))
tf_export("raw_ops.ReduceSliceMax")(ReduceSliceMax)


def reduce_slice_max_eager_fallback(data, indices, axis, name=None, ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function reduce_slice_max
  """
    _ctx = ctx if ctx else _context.context()
    _attr_T, (data, ) = _execute.args_to_matching_eager([data], _ctx)
    _attr_Tindices, (indices, ) = _execute.args_to_matching_eager([indices],
                                                                  _ctx)
    axis = _ops.convert_to_tensor(axis, _dtypes.int64)
    _inputs_flat = [data, indices, axis]
    _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
    _result = _execute.execute(b"ReduceSliceMax",
    result = _dispatch.dispatch(
          create_tree_ensemble_variable, tree_ensemble_handle=tree_ensemble_handle,
                                         stamp_token=stamp_token,
                                         tree_ensemble_config=tree_ensemble_config,
                                         name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  return _op
  _result = None
  return _result

def CreateTreeEnsembleVariable(tree_ensemble_handle, stamp_token, tree_ensemble_config, name=None):
  return create_tree_ensemble_variable(tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, tree_ensemble_config=tree_ensemble_config, name=name)
CreateTreeEnsembleVariable.__doc__ = create_tree_ensemble_variable.__doc__
CreateTreeEnsembleVariable = _doc_controls.do_not_generate_docs(_kwarg_only(CreateTreeEnsembleVariable))
tf_export("raw_ops.CreateTreeEnsembleVariable")(CreateTreeEnsembleVariable)


def create_tree_ensemble_variable_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_config, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function create_tree_ensemble_variable
  """
  _ctx = ctx if ctx else _context.context()
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  tree_ensemble_config = _ops.convert_to_tensor(tree_ensemble_config, _dtypes.string)
  _inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_config]
  _attrs = None
  _result = _execute.execute(b"CreateTreeEnsembleVariable", 0,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
Beispiel #14
0
                                        bias=bias,
                                        side_input=side_input,
                                        conv_input_scale=conv_input_scale,
                                        side_input_scale=side_input_scale,
                                        strides=strides,
                                        padding=padding,
                                        data_format=data_format,
                                        filter_format=filter_format,
                                        activation_mode=activation_mode,
                                        dilations=dilations,
                                        name=name)


FusedConv2DBiasActivation.__doc__ = fused_conv2d_bias_activation.__doc__
FusedConv2DBiasActivation = _doc_controls.do_not_generate_docs(
    _kwarg_only(FusedConv2DBiasActivation))
tf_export("raw_ops.FusedConv2DBiasActivation")(FusedConv2DBiasActivation)


def fused_conv2d_bias_activation_eager_fallback(conv_input,
                                                filter,
                                                bias,
                                                side_input,
                                                conv_input_scale,
                                                side_input_scale,
                                                strides,
                                                padding,
                                                data_format="NHWC",
                                                filter_format="HWIO",
                                                activation_mode="Relu",
                                                dilations=[1, 1, 1, 1],
Beispiel #15
0
        hessians=hessians,
        class_id=class_id,
        feature_column_group_id=feature_column_group_id,
        bias_feature_id=bias_feature_id,
        l1_regularization=l1_regularization,
        l2_regularization=l2_regularization,
        tree_complexity_regularization=tree_complexity_regularization,
        min_node_weight=min_node_weight,
        multiclass_strategy=multiclass_strategy,
        weak_learner_type=weak_learner_type,
        name=name)


BuildCategoricalEqualitySplits.__doc__ = build_categorical_equality_splits.__doc__
BuildCategoricalEqualitySplits = _doc_controls.do_not_generate_docs(
    _kwarg_only(BuildCategoricalEqualitySplits))
tf_export("raw_ops.BuildCategoricalEqualitySplits")(
    BuildCategoricalEqualitySplits)


def build_categorical_equality_splits_eager_fallback(
        num_minibatches,
        partition_ids,
        feature_ids,
        gradients,
        hessians,
        class_id,
        feature_column_group_id,
        bias_feature_id,
        l1_regularization,
        l2_regularization,
                               pcan_offset=pcan_offset,
                               gain_bits=gain_bits,
                               enable_log=enable_log,
                               scale_shift=scale_shift,
                               left_context=left_context,
                               right_context=right_context,
                               frame_stride=frame_stride,
                               zero_padding=zero_padding,
                               out_scale=out_scale,
                               out_type=out_type,
                               name=name)


AudioMicrofrontend.__doc__ = audio_microfrontend.__doc__
AudioMicrofrontend = _doc_controls.do_not_generate_docs(
    _kwarg_only(AudioMicrofrontend))
tf_export("raw_ops.AudioMicrofrontend")(AudioMicrofrontend)


def audio_microfrontend_eager_fallback(audio,
                                       sample_rate=16000,
                                       window_size=25,
                                       window_step=10,
                                       num_channels=32,
                                       upper_band_limit=7500,
                                       lower_band_limit=125,
                                       smoothing_bits=10,
                                       even_smoothing=0.025,
                                       odd_smoothing=0.06,
                                       min_signal_remaining=0.05,
                                       enable_pcan=False,
Beispiel #17
0

def DecodeLibsvm(input,
                 num_features,
                 dtype=_dtypes.float32,
                 label_dtype=_dtypes.int64,
                 name=None):
    return decode_libsvm(input=input,
                         num_features=num_features,
                         dtype=dtype,
                         label_dtype=label_dtype,
                         name=name)


DecodeLibsvm.__doc__ = decode_libsvm.__doc__
DecodeLibsvm = _doc_controls.do_not_generate_docs(_kwarg_only(DecodeLibsvm))
tf_export("raw_ops.DecodeLibsvm")(DecodeLibsvm)


def decode_libsvm_eager_fallback(input,
                                 num_features,
                                 dtype=_dtypes.float32,
                                 label_dtype=_dtypes.int64,
                                 name=None,
                                 ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function decode_libsvm
  """
    _ctx = ctx if ctx else _context.context()
    num_features = _execute.make_int(num_features, "num_features")
    if dtype is None:

def EncodeAudio(sampled_audio,
                file_format,
                samples_per_second,
                bits_per_second=192000,
                name=None):
    return encode_audio(sampled_audio=sampled_audio,
                        file_format=file_format,
                        samples_per_second=samples_per_second,
                        bits_per_second=bits_per_second,
                        name=name)


EncodeAudio.__doc__ = encode_audio.__doc__
EncodeAudio = _doc_controls.do_not_generate_docs(_kwarg_only(EncodeAudio))
tf_export("raw_ops.EncodeAudio")(EncodeAudio)


def encode_audio_eager_fallback(sampled_audio,
                                file_format,
                                samples_per_second,
                                bits_per_second=192000,
                                name=None,
                                ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function encode_audio
  """
    _ctx = ctx if ctx else _context.context()
    file_format = _execute.make_str(file_format, "file_format")
    samples_per_second = _execute.make_int(samples_per_second,
Beispiel #19
0
                           delta_updates,
                           learner_config,
                           centering_epsilon=0.01,
                           name=None):
    return center_tree_ensemble_bias(tree_ensemble_handle=tree_ensemble_handle,
                                     stamp_token=stamp_token,
                                     next_stamp_token=next_stamp_token,
                                     delta_updates=delta_updates,
                                     learner_config=learner_config,
                                     centering_epsilon=centering_epsilon,
                                     name=name)


CenterTreeEnsembleBias.__doc__ = center_tree_ensemble_bias.__doc__
CenterTreeEnsembleBias = _doc_controls.do_not_generate_docs(
    _kwarg_only(CenterTreeEnsembleBias))
tf_export("raw_ops.CenterTreeEnsembleBias")(CenterTreeEnsembleBias)


def center_tree_ensemble_bias_eager_fallback(tree_ensemble_handle,
                                             stamp_token,
                                             next_stamp_token,
                                             delta_updates,
                                             learner_config,
                                             centering_epsilon=0.01,
                                             name=None,
                                             ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function center_tree_ensemble_bias
  """
    _ctx = ctx if ctx else _context.context()
Beispiel #20
0
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = None
    _execute.record_gradient("ReinterpretStringToFloat", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result


def ReinterpretStringToFloat(input_data, name=None):
    return reinterpret_string_to_float(input_data=input_data, name=name)


ReinterpretStringToFloat.__doc__ = reinterpret_string_to_float.__doc__
ReinterpretStringToFloat = _doc_controls.do_not_generate_docs(
    _kwarg_only(ReinterpretStringToFloat))
tf_export("raw_ops.ReinterpretStringToFloat")(ReinterpretStringToFloat)


def reinterpret_string_to_float_eager_fallback(input_data,
                                               name=None,
                                               ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function reinterpret_string_to_float
  """
    _ctx = ctx if ctx else _context.context()
    input_data = _ops.convert_to_tensor(input_data, _dtypes.string)
    _inputs_flat = [input_data]
    _attrs = None
    _result = _execute.execute(b"ReinterpretStringToFloat",
                               1,
                        group_size,
                        group_key,
                        instance_key,
                        shape,
                        name=None):
    return collective_bcast_recv(T=T,
                                 group_size=group_size,
                                 group_key=group_key,
                                 instance_key=instance_key,
                                 shape=shape,
                                 name=name)


CollectiveBcastRecv.__doc__ = collective_bcast_recv.__doc__
CollectiveBcastRecv = _doc_controls.do_not_generate_docs(
    _kwarg_only(CollectiveBcastRecv))
tf_export("raw_ops.CollectiveBcastRecv")(CollectiveBcastRecv)


def collective_bcast_recv_eager_fallback(T,
                                         group_size,
                                         group_key,
                                         instance_key,
                                         shape,
                                         name=None,
                                         ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function collective_bcast_recv
  """
    _ctx = ctx if ctx else _context.context()
    T = _execute.make_type(T, "T")
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = None
    _execute.record_gradient("KMC2ChainInitialization", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result


def KMC2ChainInitialization(distances, seed, name=None):
    return kmc2_chain_initialization(distances=distances, seed=seed, name=name)


KMC2ChainInitialization.__doc__ = kmc2_chain_initialization.__doc__
KMC2ChainInitialization = _doc_controls.do_not_generate_docs(
    _kwarg_only(KMC2ChainInitialization))
tf_export("raw_ops.KMC2ChainInitialization")(KMC2ChainInitialization)


def kmc2_chain_initialization_eager_fallback(distances,
                                             seed,
                                             name=None,
                                             ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function kmc2_chain_initialization
  """
    _ctx = ctx if ctx else _context.context()
    distances = _ops.convert_to_tensor(distances, _dtypes.float32)
    seed = _ops.convert_to_tensor(seed, _dtypes.int64)
    _inputs_flat = [distances, seed]
    _attrs = None
Beispiel #23
0
          periodic_resample, values=values, shape=shape, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op.get_attr("T"), "shape", _op.get_attr("shape"))
  _execute.record_gradient(
      "PeriodicResample", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result

def PeriodicResample(values, shape, name=None):
  return periodic_resample(values=values, shape=shape, name=name)
PeriodicResample.__doc__ = periodic_resample.__doc__
PeriodicResample = _doc_controls.do_not_generate_docs(_kwarg_only(PeriodicResample))
tf_export("raw_ops.PeriodicResample")(PeriodicResample)


def periodic_resample_eager_fallback(values, shape, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function periodic_resample
  """
  _ctx = ctx if ctx else _context.context()
  shape = _execute.make_shape(shape, "shape")
  _attr_T, (values,) = _execute.args_to_matching_eager([values], _ctx)
  _inputs_flat = [values]
  _attrs = ("T", _attr_T, "shape", shape)
  _result = _execute.execute(b"PeriodicResample", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
Beispiel #24
0
def EncodeProto(sizes,
                values,
                field_names,
                message_type,
                descriptor_source="local://",
                name=None):
    return encode_proto(sizes=sizes,
                        values=values,
                        field_names=field_names,
                        message_type=message_type,
                        descriptor_source=descriptor_source,
                        name=name)


EncodeProto.__doc__ = encode_proto.__doc__
EncodeProto = _doc_controls.do_not_generate_docs(_kwarg_only(EncodeProto))
tf_export("raw_ops.EncodeProto")(EncodeProto)


def encode_proto_eager_fallback(sizes,
                                values,
                                field_names,
                                message_type,
                                descriptor_source="local://",
                                name=None,
                                ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function encode_proto
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(field_names, (list, tuple)):
                       internal_type,
                       name=None):
    return sparse_feature_cross(indices=indices,
                                values=values,
                                shapes=shapes,
                                dense=dense,
                                hashed_output=hashed_output,
                                num_buckets=num_buckets,
                                out_type=out_type,
                                internal_type=internal_type,
                                name=name)


SparseFeatureCross.__doc__ = sparse_feature_cross.__doc__
SparseFeatureCross = _doc_controls.do_not_generate_docs(
    _kwarg_only(SparseFeatureCross))
tf_export("raw_ops.SparseFeatureCross")(SparseFeatureCross)


def sparse_feature_cross_eager_fallback(indices,
                                        values,
                                        shapes,
                                        dense,
                                        hashed_output,
                                        num_buckets,
                                        out_type,
                                        internal_type,
                                        name=None,
                                        ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross
Beispiel #26
0
    # Add nodes to the TensorFlow graph.
    _, _, _op = _op_def_lib._apply_op_helper("Fact", name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = None
    _execute.record_gradient("Fact", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result


def Fact(name=None):
    return fact(name=name)


Fact.__doc__ = fact.__doc__
Fact = _doc_controls.do_not_generate_docs(_kwarg_only(Fact))
tf_export("raw_ops.Fact")(Fact)


def fact_eager_fallback(name=None, ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function fact
  """
    _ctx = ctx if ctx else _context.context()
    _inputs_flat = []
    _attrs = None
    _result = _execute.execute(b"Fact",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                                             tree_config=tree_config,
                                             name=name)
    return _op
    _result = None
    return _result


def TensorForestCreateTreeVariable(tree_handle, tree_config, name=None):
    return tensor_forest_create_tree_variable(tree_handle=tree_handle,
                                              tree_config=tree_config,
                                              name=name)


TensorForestCreateTreeVariable.__doc__ = tensor_forest_create_tree_variable.__doc__
TensorForestCreateTreeVariable = _doc_controls.do_not_generate_docs(
    _kwarg_only(TensorForestCreateTreeVariable))
tf_export("raw_ops.TensorForestCreateTreeVariable")(
    TensorForestCreateTreeVariable)


def tensor_forest_create_tree_variable_eager_fallback(tree_handle,
                                                      tree_config,
                                                      name=None,
                                                      ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function tensor_forest_create_tree_variable
  """
    _ctx = ctx if ctx else _context.context()
    tree_handle = _ops.convert_to_tensor(tree_handle, _dtypes.resource)
    tree_config = _ops.convert_to_tensor(tree_config, _dtypes.string)
    _inputs_flat = [tree_handle, tree_config]
Beispiel #28
0
          xla_cluster_output, input=input, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op._get_attr_type("T"))
  _execute.record_gradient(
      "XlaClusterOutput", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result

def XlaClusterOutput(input, name=None):
  return xla_cluster_output(input=input, name=name)
XlaClusterOutput.__doc__ = xla_cluster_output.__doc__
XlaClusterOutput = _doc_controls.do_not_generate_docs(_kwarg_only(XlaClusterOutput))
tf_export("raw_ops.XlaClusterOutput")(XlaClusterOutput)


def xla_cluster_output_eager_fallback(input, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function xla_cluster_output
  """
  _ctx = ctx if ctx else _context.context()
  _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
  _inputs_flat = [input]
  _attrs = ("T", _attr_T)
  _result = _execute.execute(b"XlaClusterOutput", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "XlaClusterOutput", _inputs_flat, _attrs, _result, name)
                                    validate_indices=validate_indices,
                                    name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("set_operation", _op.get_attr("set_operation"),
            "validate_indices", _op.get_attr("validate_indices"), "T",
            _op.get_attr("T"))
  _execute.record_gradient(
      "DenseToDenseSetOperation", _inputs_flat, _attrs, _result, name)
  _result = _DenseToDenseSetOperationOutput._make(_result)
  return _result

def DenseToDenseSetOperation(set1, set2, set_operation, validate_indices=True, name=None):
  return dense_to_dense_set_operation(set1=set1, set2=set2, set_operation=set_operation, validate_indices=validate_indices, name=name)
DenseToDenseSetOperation.__doc__ = dense_to_dense_set_operation.__doc__
DenseToDenseSetOperation = _doc_controls.do_not_generate_docs(_kwarg_only(DenseToDenseSetOperation))
tf_export("raw_ops.DenseToDenseSetOperation")(DenseToDenseSetOperation)


def dense_to_dense_set_operation_eager_fallback(set1, set2, set_operation, validate_indices=True, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function dense_to_dense_set_operation
  """
  _ctx = ctx if ctx else _context.context()
  set_operation = _execute.make_str(set_operation, "set_operation")
  if validate_indices is None:
    validate_indices = True
  validate_indices = _execute.make_bool(validate_indices, "validate_indices")
  _attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2], _ctx)
  (set1, set2) = _inputs_T
  _inputs_flat = [set1, set2]
Beispiel #30
0
                             name)
    _result, = _result
    return _result


def KinesisDataset(stream, shard, read_indefinitely, interval, name=None):
    return kinesis_dataset(stream=stream,
                           shard=shard,
                           read_indefinitely=read_indefinitely,
                           interval=interval,
                           name=name)


KinesisDataset.__doc__ = kinesis_dataset.__doc__
KinesisDataset = _doc_controls.do_not_generate_docs(
    _kwarg_only(KinesisDataset))
tf_export("raw_ops.KinesisDataset")(KinesisDataset)


def kinesis_dataset_eager_fallback(stream,
                                   shard,
                                   read_indefinitely,
                                   interval,
                                   name=None,
                                   ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function kinesis_dataset
  """
    _ctx = ctx if ctx else _context.context()
    stream = _ops.convert_to_tensor(stream, _dtypes.string)
    shard = _ops.convert_to_tensor(shard, _dtypes.string)