コード例 #1
0
ファイル: gen_audio_ops.py プロジェクト: otaku-47/Legal_EE
def mfcc_eager_fallback(spectrogram, sample_rate, upper_frequency_limit=4000, lower_frequency_limit=20, filterbank_channel_count=40, dct_coefficient_count=13, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function mfcc
  """
  _ctx = ctx if ctx else _context.context()
  if upper_frequency_limit is None:
    upper_frequency_limit = 4000
  upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit")
  if lower_frequency_limit is None:
    lower_frequency_limit = 20
  lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit")
  if filterbank_channel_count is None:
    filterbank_channel_count = 40
  filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count")
  if dct_coefficient_count is None:
    dct_coefficient_count = 13
  dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count")
  spectrogram = _ops.convert_to_tensor(spectrogram, _dtypes.float32)
  sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
  _inputs_flat = [spectrogram, sample_rate]
  _attrs = ("upper_frequency_limit", upper_frequency_limit,
  "lower_frequency_limit", lower_frequency_limit, "filterbank_channel_count",
  filterbank_channel_count, "dct_coefficient_count", dct_coefficient_count)
  _result = _execute.execute(b"Mfcc", 1, inputs=_inputs_flat, attrs=_attrs,
                             ctx=_ctx, name=name)
  _execute.record_gradient(
      "Mfcc", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #2
0
def mfcc_eager_fallback(spectrogram, sample_rate, upper_frequency_limit, lower_frequency_limit, filterbank_channel_count, dct_coefficient_count, name, ctx):
  if upper_frequency_limit is None:
    upper_frequency_limit = 4000
  upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit")
  if lower_frequency_limit is None:
    lower_frequency_limit = 20
  lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit")
  if filterbank_channel_count is None:
    filterbank_channel_count = 40
  filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count")
  if dct_coefficient_count is None:
    dct_coefficient_count = 13
  dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count")
  spectrogram = _ops.convert_to_tensor(spectrogram, _dtypes.float32)
  sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
  _inputs_flat = [spectrogram, sample_rate]
  _attrs = ("upper_frequency_limit", upper_frequency_limit,
  "lower_frequency_limit", lower_frequency_limit, "filterbank_channel_count",
  filterbank_channel_count, "dct_coefficient_count", dct_coefficient_count)
  _result = _execute.execute(b"Mfcc", 1, inputs=_inputs_flat, attrs=_attrs,
                             ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "Mfcc", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
コード例 #3
0
def sdca_shrink_l1(weights, l1, l2, name=None):
  r"""Applies L1 regularization shrink step on the parameters.

  Args:
    weights: A list of `Tensor` objects with type mutable `float32`.
      a list of vectors where each value is the weight associated with a
      feature group.
    l1: A `float`. Symmetric l1 regularization strength.
    l2: A `float`.
      Symmetric l2 regularization strength. Should be a positive float.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
  if not isinstance(weights, (list, tuple)):
    raise TypeError(
        "Expected list for 'weights' argument to "
        "'sdca_shrink_l1' Op, not %r." % weights)
  _attr_num_features = len(weights)
  l1 = _execute.make_float(l1, "l1")
  l2 = _execute.make_float(l2, "l2")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "SdcaShrinkL1", weights=weights, l1=l1, l2=l2, name=name)
    return _op
  else:
    raise RuntimeError(
        "sdca_shrink_l1 op does not support eager execution. Arg 'weights'' is a ref.")
  return _result
コード例 #4
0
ファイル: gen_lstm_ops.py プロジェクト: FedericoFontana/ray
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function lstm_block_cell
  """
  _ctx = ctx if ctx else _context.context()
  if forget_bias is None:
    forget_bias = 1
  forget_bias = _execute.make_float(forget_bias, "forget_bias")
  if cell_clip is None:
    cell_clip = 3
  cell_clip = _execute.make_float(cell_clip, "cell_clip")
  if use_peephole is None:
    use_peephole = False
  use_peephole = _execute.make_bool(use_peephole, "use_peephole")
  _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
  (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
  _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
  _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
  "use_peephole", use_peephole, "T", _attr_T)
  _result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "LSTMBlockCell", _inputs_flat, _attrs, _result, name)
  _result = _LSTMBlockCellOutput._make(_result)
  return _result
コード例 #5
0
def build_sparse_inequality_splits_eager_fallback(
        num_minibatches,
        partition_ids,
        bucket_ids,
        gradients,
        hessians,
        bucket_boundaries,
        class_id,
        feature_column_group_id,
        bias_feature_id,
        l1_regularization,
        l2_regularization,
        tree_complexity_regularization,
        min_node_weight,
        multiclass_strategy,
        name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function build_sparse_inequality_splits
  """
    _ctx = _context.context()
    feature_column_group_id = _execute.make_int(feature_column_group_id,
                                                "feature_column_group_id")
    bias_feature_id = _execute.make_int(bias_feature_id, "bias_feature_id")
    l1_regularization = _execute.make_float(l1_regularization,
                                            "l1_regularization")
    l2_regularization = _execute.make_float(l2_regularization,
                                            "l2_regularization")
    tree_complexity_regularization = _execute.make_float(
        tree_complexity_regularization, "tree_complexity_regularization")
    min_node_weight = _execute.make_float(min_node_weight, "min_node_weight")
    multiclass_strategy = _execute.make_int(multiclass_strategy,
                                            "multiclass_strategy")
    num_minibatches = _ops.convert_to_tensor(num_minibatches, _dtypes.int64)
    partition_ids = _ops.convert_to_tensor(partition_ids, _dtypes.int32)
    bucket_ids = _ops.convert_to_tensor(bucket_ids, _dtypes.int64)
    gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
    hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
    bucket_boundaries = _ops.convert_to_tensor(bucket_boundaries,
                                               _dtypes.float32)
    class_id = _ops.convert_to_tensor(class_id, _dtypes.int32)
    _inputs_flat = [
        num_minibatches, partition_ids, bucket_ids, gradients, hessians,
        bucket_boundaries, class_id
    ]
    _attrs = ("feature_column_group_id", feature_column_group_id,
              "bias_feature_id", bias_feature_id, "l1_regularization",
              l1_regularization, "l2_regularization", l2_regularization,
              "tree_complexity_regularization", tree_complexity_regularization,
              "min_node_weight", min_node_weight, "multiclass_strategy",
              multiclass_strategy)
    _result = _execute.execute(b"BuildSparseInequalitySplits",
                               3,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("BuildSparseInequalitySplits", _inputs_flat,
                             _attrs, _result, name)
    _result = _BuildSparseInequalitySplitsOutput._make(_result)
    return _result
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
  r"""This is the slowpath function for Eager mode.
  This is for function block_lstm
  """
  _ctx = _context.context()
  if forget_bias is None:
    forget_bias = 1
  forget_bias = _execute.make_float(forget_bias, "forget_bias")
  if cell_clip is None:
    cell_clip = 3
  cell_clip = _execute.make_float(cell_clip, "cell_clip")
  if use_peephole is None:
    use_peephole = False
  use_peephole = _execute.make_bool(use_peephole, "use_peephole")
  _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
  (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
  seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
  _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
  _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
  "use_peephole", use_peephole, "T", _attr_T)
  _result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "BlockLSTM", _inputs_flat, _attrs, _result, name)
  _result = _BlockLSTMOutput._make(_result)
  return _result
def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
  _ctx = ctx if ctx else _context.context()
  if hidden_surface_removal is None:
    hidden_surface_removal = True
  hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
  if convergence_dots_size is None:
    convergence_dots_size = 8
  convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
  if dots_per_inch is None:
    dots_per_inch = 72
  dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
  if eye_separation is None:
    eye_separation = 2.5
  eye_separation = _execute.make_float(eye_separation, "eye_separation")
  if mu is None:
    mu = 0.3333
  mu = _execute.make_float(mu, "mu")
  if normalize is None:
    normalize = True
  normalize = _execute.make_bool(normalize, "normalize")
  if normalize_max is None:
    normalize_max = -100
  normalize_max = _execute.make_float(normalize_max, "normalize_max")
  if normalize_min is None:
    normalize_min = 100
  normalize_min = _execute.make_float(normalize_min, "normalize_min")
  if border_level is None:
    border_level = 0
  border_level = _execute.make_float(border_level, "border_level")
  if number_colors is None:
    number_colors = 256
  number_colors = _execute.make_int(number_colors, "number_colors")
  if output_image_shape is None:
    output_image_shape = [1024, 768, 1]
  output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
  if output_data_window is None:
    output_data_window = [1022, 757]
  output_data_window = _execute.make_shape(output_data_window, "output_data_window")
  _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx)
  _inputs_flat = [depth_values]
  _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
  "convergence_dots_size", convergence_dots_size, "dots_per_inch",
  dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize",
  normalize, "normalize_max", normalize_max, "normalize_min", normalize_min,
  "border_level", border_level, "number_colors", number_colors,
  "output_image_shape", output_image_shape, "output_data_window",
  output_data_window)
  _result = _execute.execute(b"SingleImageRandomDotStereograms", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #8
0
def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
  _ctx = ctx if ctx else _context.context()
  if hidden_surface_removal is None:
    hidden_surface_removal = True
  hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
  if convergence_dots_size is None:
    convergence_dots_size = 8
  convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
  if dots_per_inch is None:
    dots_per_inch = 72
  dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
  if eye_separation is None:
    eye_separation = 2.5
  eye_separation = _execute.make_float(eye_separation, "eye_separation")
  if mu is None:
    mu = 0.3333
  mu = _execute.make_float(mu, "mu")
  if normalize is None:
    normalize = True
  normalize = _execute.make_bool(normalize, "normalize")
  if normalize_max is None:
    normalize_max = -100
  normalize_max = _execute.make_float(normalize_max, "normalize_max")
  if normalize_min is None:
    normalize_min = 100
  normalize_min = _execute.make_float(normalize_min, "normalize_min")
  if border_level is None:
    border_level = 0
  border_level = _execute.make_float(border_level, "border_level")
  if number_colors is None:
    number_colors = 256
  number_colors = _execute.make_int(number_colors, "number_colors")
  if output_image_shape is None:
    output_image_shape = [1024, 768, 1]
  output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
  if output_data_window is None:
    output_data_window = [1022, 757]
  output_data_window = _execute.make_shape(output_data_window, "output_data_window")
  _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx)
  _inputs_flat = [depth_values]
  _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
  "convergence_dots_size", convergence_dots_size, "dots_per_inch",
  dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize",
  normalize, "normalize_max", normalize_max, "normalize_min", normalize_min,
  "border_level", border_level, "number_colors", number_colors,
  "output_image_shape", output_image_shape, "output_data_window",
  output_data_window)
  _result = _execute.execute(b"SingleImageRandomDotStereograms", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #9
0
def fixed_unigram_candidate_sampler_eager_fallback(
        true_classes, num_true, num_sampled, unique, range_max, vocab_file,
        distortion, num_reserved_ids, num_shards, shard, unigrams, seed, seed2,
        name, ctx):
    num_true = _execute.make_int(num_true, "num_true")
    num_sampled = _execute.make_int(num_sampled, "num_sampled")
    unique = _execute.make_bool(unique, "unique")
    range_max = _execute.make_int(range_max, "range_max")
    if vocab_file is None:
        vocab_file = ""
    vocab_file = _execute.make_str(vocab_file, "vocab_file")
    if distortion is None:
        distortion = 1
    distortion = _execute.make_float(distortion, "distortion")
    if num_reserved_ids is None:
        num_reserved_ids = 0
    num_reserved_ids = _execute.make_int(num_reserved_ids, "num_reserved_ids")
    if num_shards is None:
        num_shards = 1
    num_shards = _execute.make_int(num_shards, "num_shards")
    if shard is None:
        shard = 0
    shard = _execute.make_int(shard, "shard")
    if unigrams is None:
        unigrams = []
    if not isinstance(unigrams, (list, tuple)):
        raise TypeError("Expected list for 'unigrams' argument to "
                        "'fixed_unigram_candidate_sampler' Op, not %r." %
                        unigrams)
    unigrams = [_execute.make_float(_f, "unigrams") for _f in unigrams]
    if seed is None:
        seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
        seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64)
    _inputs_flat = [true_classes]
    _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique",
              unique, "range_max", range_max, "vocab_file", vocab_file,
              "distortion", distortion, "num_reserved_ids", num_reserved_ids,
              "num_shards", num_shards, "shard", shard, "unigrams", unigrams,
              "seed", seed, "seed2", seed2)
    _result = _execute.execute(b"FixedUnigramCandidateSampler",
                               3,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("FixedUnigramCandidateSampler", _inputs_flat,
                                 _attrs, _result)
    _result = _FixedUnigramCandidateSamplerOutput._make(_result)
    return _result
コード例 #10
0
def debug_numeric_summary_eager_fallback(input,
                                         device_name="",
                                         tensor_name="",
                                         debug_urls=[],
                                         lower_bound=float('-inf'),
                                         upper_bound=float('inf'),
                                         mute_if_healthy=False,
                                         gated_grpc=False,
                                         name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function debug_numeric_summary
  """
    _ctx = _context.context()
    if device_name is None:
        device_name = ""
    device_name = _execute.make_str(device_name, "device_name")
    if tensor_name is None:
        tensor_name = ""
    tensor_name = _execute.make_str(tensor_name, "tensor_name")
    if debug_urls is None:
        debug_urls = []
    if not isinstance(debug_urls, (list, tuple)):
        raise TypeError("Expected list for 'debug_urls' argument to "
                        "'debug_numeric_summary' Op, not %r." % debug_urls)
    debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
    if lower_bound is None:
        lower_bound = float('-inf')
    lower_bound = _execute.make_float(lower_bound, "lower_bound")
    if upper_bound is None:
        upper_bound = float('inf')
    upper_bound = _execute.make_float(upper_bound, "upper_bound")
    if mute_if_healthy is None:
        mute_if_healthy = False
    mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
    if gated_grpc is None:
        gated_grpc = False
    gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], _ctx)
    _inputs_flat = [input]
    _attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
              tensor_name, "debug_urls", debug_urls, "lower_bound",
              lower_bound, "upper_bound", upper_bound, "mute_if_healthy",
              mute_if_healthy, "gated_grpc", gated_grpc)
    _result = _execute.execute(b"DebugNumericSummary",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("DebugNumericSummary", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
コード例 #11
0
def collective_gather_v2_eager_fallback(input, group_size, group_key,
                                        instance_key, communication_hint,
                                        timeout_seconds, name, ctx):
    if communication_hint is None:
        communication_hint = "auto"
    communication_hint = _execute.make_str(communication_hint,
                                           "communication_hint")
    if timeout_seconds is None:
        timeout_seconds = 0
    timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx, [
        _dtypes.float32,
        _dtypes.half,
        _dtypes.float64,
        _dtypes.int32,
        _dtypes.int64,
    ])
    group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
    group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
    instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
    _inputs_flat = [input, group_size, group_key, instance_key]
    _attrs = ("T", _attr_T, "communication_hint", communication_hint,
              "timeout_seconds", timeout_seconds)
    _result = _execute.execute(b"CollectiveGatherV2",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("CollectiveGatherV2", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
コード例 #12
0
def collective_bcast_recv_eager_fallback(T, group_size, group_key,
                                         instance_key, shape,
                                         communication_hint, timeout_seconds,
                                         name, ctx):
    T = _execute.make_type(T, "T")
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    if communication_hint is None:
        communication_hint = "auto"
    communication_hint = _execute.make_str(communication_hint,
                                           "communication_hint")
    if timeout_seconds is None:
        timeout_seconds = 0
    timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
    _inputs_flat = []
    _attrs = ("T", T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "shape", shape,
              "communication_hint", communication_hint, "timeout_seconds",
              timeout_seconds)
    _result = _execute.execute(b"CollectiveBcastRecv",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("CollectiveBcastRecv", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
コード例 #13
0
def audio_summary_eager_fallback(tag,
                                 tensor,
                                 sample_rate,
                                 max_outputs=3,
                                 name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function audio_summary
  """
    _ctx = _context.context()
    sample_rate = _execute.make_float(sample_rate, "sample_rate")
    if max_outputs is None:
        max_outputs = 3
    max_outputs = _execute.make_int(max_outputs, "max_outputs")
    tag = _ops.convert_to_tensor(tag, _dtypes.string)
    tensor = _ops.convert_to_tensor(tensor, _dtypes.float32)
    _inputs_flat = [tag, tensor]
    _attrs = ("sample_rate", sample_rate, "max_outputs", max_outputs)
    _result = _execute.execute(b"AudioSummary",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("AudioSummary", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
コード例 #14
0
def collective_gather_eager_fallback(input, group_size, group_key,
                                     instance_key, shape, communication_hint,
                                     timeout_seconds, name, ctx):
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    if communication_hint is None:
        communication_hint = "auto"
    communication_hint = _execute.make_str(communication_hint,
                                           "communication_hint")
    if timeout_seconds is None:
        timeout_seconds = 0
    timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx)
    _inputs_flat = [input]
    _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "shape", shape,
              "communication_hint", communication_hint, "timeout_seconds",
              timeout_seconds)
    _result = _execute.execute(b"CollectiveGather",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("CollectiveGather", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
コード例 #15
0
def _audio_summary(tag, tensor, sample_rate, max_outputs=3, name=None):
    r"""Outputs a `Summary` protocol buffer with audio.

  The summary has up to `max_outputs` summary values containing audio. The
  audio is built from `tensor` which must be 3-D with shape `[batch_size,
  frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
  assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.

  The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
  build the `tag` of the summary values:

  *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
  *  If `max_outputs` is greater than 1, the summary value tags are
     generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

  Args:
    tag: A `Tensor` of type `string`.
      Scalar. Used to build the `tag` attribute of the summary values.
    tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`.
    sample_rate: A `float`. The sample rate of the signal in hertz.
    max_outputs: An optional `int` that is `>= 1`. Defaults to `3`.
      Max number of batch elements to generate audio for.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`. Scalar. Serialized `Summary` protocol buffer.
  """
    sample_rate = _execute.make_float(sample_rate, "sample_rate")
    if max_outputs is None:
        max_outputs = 3
    max_outputs = _execute.make_int(max_outputs, "max_outputs")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("AudioSummary",
                                                 tag=tag,
                                                 tensor=tensor,
                                                 sample_rate=sample_rate,
                                                 max_outputs=max_outputs,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("sample_rate", _op.get_attr("sample_rate"), "max_outputs",
                  _op.get_attr("max_outputs"))
    else:
        tag = _ops.convert_to_tensor(tag, _dtypes.string)
        tensor = _ops.convert_to_tensor(tensor, _dtypes.float32)
        _inputs_flat = [tag, tensor]
        _attrs = ("sample_rate", sample_rate, "max_outputs", max_outputs)
        _result = _execute.execute(b"AudioSummary",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("AudioSummary", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
コード例 #16
0
def sdca_shrink_l1(weights, l1, l2, name=None):
    r"""Applies L1 regularization shrink step on the parameters.

  Args:
    weights: A list of `Tensor` objects with type mutable `float32`.
      a list of vectors where each value is the weight associated with a
      feature group.
    l1: A `float`. Symmetric l1 regularization strength.
    l2: A `float`.
      Symmetric l2 regularization strength. Should be a positive float.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        raise RuntimeError(
            "sdca_shrink_l1 op does not support eager execution. Arg 'weights' is a ref."
        )
    # Add nodes to the TensorFlow graph.
    if not isinstance(weights, (list, tuple)):
        raise TypeError("Expected list for 'weights' argument to "
                        "'sdca_shrink_l1' Op, not %r." % weights)
    _attr_num_features = len(weights)
    l1 = _execute.make_float(l1, "l1")
    l2 = _execute.make_float(l2, "l2")
    try:
        _, _, _op, _outputs = _op_def_library._apply_op_helper("SdcaShrinkL1",
                                                               weights=weights,
                                                               l1=l1,
                                                               l2=l2,
                                                               name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(sdca_shrink_l1,
                                    weights=weights,
                                    l1=l1,
                                    l2=l2,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    return _op
コード例 #17
0
def collective_reduce_eager_fallback(input, group_size, group_key,
                                     instance_key, merge_op, final_op,
                                     subdiv_offsets, wait_for,
                                     communication_hint, timeout_seconds, name,
                                     ctx):
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    merge_op = _execute.make_str(merge_op, "merge_op")
    final_op = _execute.make_str(final_op, "final_op")
    if not isinstance(subdiv_offsets, (list, tuple)):
        raise TypeError("Expected list for 'subdiv_offsets' argument to "
                        "'collective_reduce' Op, not %r." % subdiv_offsets)
    subdiv_offsets = [
        _execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets
    ]
    if wait_for is None:
        wait_for = []
    if not isinstance(wait_for, (list, tuple)):
        raise TypeError("Expected list for 'wait_for' argument to "
                        "'collective_reduce' Op, not %r." % wait_for)
    wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for]
    if communication_hint is None:
        communication_hint = "auto"
    communication_hint = _execute.make_str(communication_hint,
                                           "communication_hint")
    if timeout_seconds is None:
        timeout_seconds = 0
    timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx, [
        _dtypes.float32,
        _dtypes.half,
        _dtypes.float64,
        _dtypes.int32,
        _dtypes.int64,
    ])
    _inputs_flat = [input]
    _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
              "instance_key", instance_key, "merge_op", merge_op, "final_op",
              final_op, "subdiv_offsets", subdiv_offsets, "wait_for", wait_for,
              "communication_hint", communication_hint, "timeout_seconds",
              timeout_seconds)
    _result = _execute.execute(b"CollectiveReduce",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("CollectiveReduce", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
コード例 #18
0
ファイル: gen_rnn_ops.py プロジェクト: RajeshM-DS/package
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias, cell_clip, use_peephole, name, ctx):
  if forget_bias is None:
    forget_bias = 1
  forget_bias = _execute.make_float(forget_bias, "forget_bias")
  if cell_clip is None:
    cell_clip = 3
  cell_clip = _execute.make_float(cell_clip, "cell_clip")
  if use_peephole is None:
    use_peephole = False
  use_peephole = _execute.make_bool(use_peephole, "use_peephole")
  _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], ctx)
  (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
  _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
  _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
  "use_peephole", use_peephole, "T", _attr_T)
  _result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "LSTMBlockCell", _inputs_flat, _attrs, _result)
  _result = _LSTMBlockCellOutput._make(_result)
  return _result
コード例 #19
0
def center_tree_ensemble_bias(tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates, learner_config, centering_epsilon=0.01, name=None):
  r"""Centers the tree ensemble bias before adding trees based on feature splits.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    delta_updates: A `Tensor` of type `float32`.
      Rank 1 Tensor containing delta updates per bias dimension.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    centering_epsilon: An optional `float`. Defaults to `0.01`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `bool`.
    Scalar indicating whether more centering is needed.
  """
  learner_config = _execute.make_str(learner_config, "learner_config")
  if centering_epsilon is None:
    centering_epsilon = 0.01
  centering_epsilon = _execute.make_float(centering_epsilon, "centering_epsilon")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "CenterTreeEnsembleBias", tree_ensemble_handle=tree_ensemble_handle,
        stamp_token=stamp_token, next_stamp_token=next_stamp_token,
        delta_updates=delta_updates, learner_config=learner_config,
        centering_epsilon=centering_epsilon, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("learner_config", _op.get_attr("learner_config"),
              "centering_epsilon", _op.get_attr("centering_epsilon"))
  else:
    tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
    stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
    next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
    delta_updates = _ops.convert_to_tensor(delta_updates, _dtypes.float32)
    _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates]
    _attrs = ("learner_config", learner_config, "centering_epsilon",
              centering_epsilon)
    _result = _execute.execute(b"CenterTreeEnsembleBias", 1,
                               inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                               name=name)
  _execute.record_gradient(
      "CenterTreeEnsembleBias", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #20
0
def audio_summary_eager_fallback(tag, tensor, sample_rate, max_outputs, name, ctx):
  sample_rate = _execute.make_float(sample_rate, "sample_rate")
  if max_outputs is None:
    max_outputs = 3
  max_outputs = _execute.make_int(max_outputs, "max_outputs")
  tag = _ops.convert_to_tensor(tag, _dtypes.string)
  tensor = _ops.convert_to_tensor(tensor, _dtypes.float32)
  _inputs_flat = [tag, tensor]
  _attrs = ("sample_rate", sample_rate, "max_outputs", max_outputs)
  _result = _execute.execute(b"AudioSummary", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "AudioSummary", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
コード例 #21
0
ファイル: gen_rnn_ops.py プロジェクト: RajeshM-DS/package
def block_lstmv2_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, cell_clip, use_peephole, name, ctx):
  if cell_clip is None:
    cell_clip = 0
  cell_clip = _execute.make_float(cell_clip, "cell_clip")
  if use_peephole is None:
    use_peephole = False
  use_peephole = _execute.make_bool(use_peephole, "use_peephole")
  _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], ctx)
  (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
  seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
  _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
  _attrs = ("cell_clip", cell_clip, "use_peephole", use_peephole, "T",
  _attr_T)
  _result = _execute.execute(b"BlockLSTMV2", 7, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BlockLSTMV2", _inputs_flat, _attrs, _result)
  _result = _BlockLSTMV2Output._make(_result)
  return _result
コード例 #22
0
ファイル: gen_training_ops.py プロジェクト: whqkdhfh13/sswp
def center_tree_ensemble_bias_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates, learner_config, centering_epsilon=0.01, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function center_tree_ensemble_bias
  """
  _ctx = ctx if ctx else _context.context()
  learner_config = _execute.make_str(learner_config, "learner_config")
  if centering_epsilon is None:
    centering_epsilon = 0.01
  centering_epsilon = _execute.make_float(centering_epsilon, "centering_epsilon")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  delta_updates = _ops.convert_to_tensor(delta_updates, _dtypes.float32)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates]
  _attrs = ("learner_config", learner_config, "centering_epsilon",
  centering_epsilon)
  _result = _execute.execute(b"CenterTreeEnsembleBias", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "CenterTreeEnsembleBias", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #23
0
def center_tree_ensemble_bias_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates, learner_config, centering_epsilon=0.01, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function center_tree_ensemble_bias
  """
  _ctx = ctx if ctx else _context.context()
  learner_config = _execute.make_str(learner_config, "learner_config")
  if centering_epsilon is None:
    centering_epsilon = 0.01
  centering_epsilon = _execute.make_float(centering_epsilon, "centering_epsilon")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  delta_updates = _ops.convert_to_tensor(delta_updates, _dtypes.float32)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates]
  _attrs = ("learner_config", learner_config, "centering_epsilon",
  centering_epsilon)
  _result = _execute.execute(b"CenterTreeEnsembleBias", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "CenterTreeEnsembleBias", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #24
0
def center_tree_ensemble_bias(tree_ensemble_handle,
                              stamp_token,
                              next_stamp_token,
                              delta_updates,
                              learner_config,
                              centering_epsilon=0.01,
                              name=None):
    r"""Centers the tree ensemble bias before adding trees based on feature splits.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    delta_updates: A `Tensor` of type `float32`.
      Rank 1 Tensor containing delta updates per bias dimension.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    centering_epsilon: An optional `float`. Defaults to `0.01`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `bool`.
    Scalar indicating whether more centering is needed.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        learner_config = _execute.make_str(learner_config, "learner_config")
        if centering_epsilon is None:
            centering_epsilon = 0.01
        centering_epsilon = _execute.make_float(centering_epsilon,
                                                "centering_epsilon")
        _, _, _op = _op_def_lib._apply_op_helper(
            "CenterTreeEnsembleBias",
            tree_ensemble_handle=tree_ensemble_handle,
            stamp_token=stamp_token,
            next_stamp_token=next_stamp_token,
            delta_updates=delta_updates,
            learner_config=learner_config,
            centering_epsilon=centering_epsilon,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("learner_config", _op.get_attr("learner_config"),
                  "centering_epsilon", _op.get_attr("centering_epsilon"))
        _execute.record_gradient("CenterTreeEnsembleBias", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "CenterTreeEnsembleBias", name,
                _ctx._post_execution_callbacks, tree_ensemble_handle,
                stamp_token, next_stamp_token, delta_updates, "learner_config",
                learner_config, "centering_epsilon", centering_epsilon)
            return _result
        except _core._FallbackException:
            return center_tree_ensemble_bias_eager_fallback(
                tree_ensemble_handle,
                stamp_token,
                next_stamp_token,
                delta_updates,
                learner_config=learner_config,
                centering_epsilon=centering_epsilon,
                name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
コード例 #25
0
def audio_microfrontend_eager_fallback(audio, sample_rate, window_size, window_step, num_channels, upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing, odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength, pcan_offset, gain_bits, enable_log, scale_shift, left_context, right_context, frame_stride, zero_padding, out_scale, out_type, name, ctx):
  if sample_rate is None:
    sample_rate = 16000
  sample_rate = _execute.make_int(sample_rate, "sample_rate")
  if window_size is None:
    window_size = 25
  window_size = _execute.make_int(window_size, "window_size")
  if window_step is None:
    window_step = 10
  window_step = _execute.make_int(window_step, "window_step")
  if num_channels is None:
    num_channels = 32
  num_channels = _execute.make_int(num_channels, "num_channels")
  if upper_band_limit is None:
    upper_band_limit = 7500
  upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit")
  if lower_band_limit is None:
    lower_band_limit = 125
  lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit")
  if smoothing_bits is None:
    smoothing_bits = 10
  smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits")
  if even_smoothing is None:
    even_smoothing = 0.025
  even_smoothing = _execute.make_float(even_smoothing, "even_smoothing")
  if odd_smoothing is None:
    odd_smoothing = 0.06
  odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing")
  if min_signal_remaining is None:
    min_signal_remaining = 0.05
  min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining")
  if enable_pcan is None:
    enable_pcan = False
  enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan")
  if pcan_strength is None:
    pcan_strength = 0.95
  pcan_strength = _execute.make_float(pcan_strength, "pcan_strength")
  if pcan_offset is None:
    pcan_offset = 80
  pcan_offset = _execute.make_float(pcan_offset, "pcan_offset")
  if gain_bits is None:
    gain_bits = 21
  gain_bits = _execute.make_int(gain_bits, "gain_bits")
  if enable_log is None:
    enable_log = True
  enable_log = _execute.make_bool(enable_log, "enable_log")
  if scale_shift is None:
    scale_shift = 6
  scale_shift = _execute.make_int(scale_shift, "scale_shift")
  if left_context is None:
    left_context = 0
  left_context = _execute.make_int(left_context, "left_context")
  if right_context is None:
    right_context = 0
  right_context = _execute.make_int(right_context, "right_context")
  if frame_stride is None:
    frame_stride = 1
  frame_stride = _execute.make_int(frame_stride, "frame_stride")
  if zero_padding is None:
    zero_padding = False
  zero_padding = _execute.make_bool(zero_padding, "zero_padding")
  if out_scale is None:
    out_scale = 1
  out_scale = _execute.make_int(out_scale, "out_scale")
  if out_type is None:
    out_type = _dtypes.uint16
  out_type = _execute.make_type(out_type, "out_type")
  audio = _ops.convert_to_tensor(audio, _dtypes.int16)
  _inputs_flat = [audio]
  _attrs = ("sample_rate", sample_rate, "window_size", window_size,
  "window_step", window_step, "num_channels", num_channels,
  "upper_band_limit", upper_band_limit, "lower_band_limit", lower_band_limit,
  "smoothing_bits", smoothing_bits, "even_smoothing", even_smoothing,
  "odd_smoothing", odd_smoothing, "min_signal_remaining",
  min_signal_remaining, "enable_pcan", enable_pcan, "pcan_strength",
  pcan_strength, "pcan_offset", pcan_offset, "gain_bits", gain_bits,
  "enable_log", enable_log, "scale_shift", scale_shift, "left_context",
  left_context, "right_context", right_context, "frame_stride", frame_stride,
  "zero_padding", zero_padding, "out_scale", out_scale, "out_type", out_type)
  _result = _execute.execute(b"AudioMicrofrontend", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "AudioMicrofrontend", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
コード例 #26
0
def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None):
  r"""Audio Microfrontend Op.

  This Op converts a sequence of audio data into one or more
  feature vectors containing filterbanks of the input. The
  conversion process uses a lightweight library to perform:

  1. A slicing window function
  2. Short-time FFTs
  3. Filterbank calculations
  4. Noise reduction
  5. PCAN Auto Gain Control
  6. Logarithmic scaling

  Arguments
    audio: 1D Tensor, int16 audio data in temporal ordering.
    sample_rate: Integer, the sample rate of the audio in Hz.
    window_size: Integer, length of desired time frames in ms.
    window_step: Integer, length of step size for the next frame in ms.
    num_channels: Integer, the number of filterbank channels to use.
    upper_band_limit: Float, the highest frequency included in the filterbanks.
    lower_band_limit: Float, the lowest frequency included in the filterbanks.
    smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
    even_smoothing: Float, smoothing coefficient for even-numbered channels.
    odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
    min_signal_remaining: Float, fraction of signal to preserve in smoothing.
    enable_pcan: Bool, enable PCAN auto gain control.
    pcan_strength: Float, gain normalization exponent.
    pcan_offset: Float, positive value added in the normalization denominator.
    gain_bits: Int, number of fractional bits in the gain.
    enable_log: Bool, enable logarithmic scaling of filterbanks.
    scale_shift: Integer, scale filterbanks by 2^(scale_shift).
    left_context: Integer, number of preceding frames to attach to each frame.
    right_context: Integer, number of preceding frames to attach to each frame.
    frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
    zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
                  zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
    out_scale: Integer, divide all filterbanks by this number.
    out_type: DType, type of the output Tensor, defaults to UINT16.

  Returns
    filterbanks: 2D Tensor, each row is a time frame, each column is a channel.

  Args:
    audio: A `Tensor` of type `int16`.
    sample_rate: An optional `int`. Defaults to `16000`.
    window_size: An optional `int`. Defaults to `25`.
    window_step: An optional `int`. Defaults to `10`.
    num_channels: An optional `int`. Defaults to `32`.
    upper_band_limit: An optional `float`. Defaults to `7500`.
    lower_band_limit: An optional `float`. Defaults to `125`.
    smoothing_bits: An optional `int`. Defaults to `10`.
    even_smoothing: An optional `float`. Defaults to `0.025`.
    odd_smoothing: An optional `float`. Defaults to `0.06`.
    min_signal_remaining: An optional `float`. Defaults to `0.05`.
    enable_pcan: An optional `bool`. Defaults to `False`.
    pcan_strength: An optional `float`. Defaults to `0.95`.
    pcan_offset: An optional `float`. Defaults to `80`.
    gain_bits: An optional `int`. Defaults to `21`.
    enable_log: An optional `bool`. Defaults to `True`.
    scale_shift: An optional `int`. Defaults to `6`.
    left_context: An optional `int`. Defaults to `0`.
    right_context: An optional `int`. Defaults to `0`.
    frame_stride: An optional `int`. Defaults to `1`.
    zero_padding: An optional `bool`. Defaults to `False`.
    out_scale: An optional `int`. Defaults to `1`.
    out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `out_type`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "AudioMicrofrontend", name,
        tld.op_callbacks, audio, "sample_rate", sample_rate, "window_size",
        window_size, "window_step", window_step, "num_channels", num_channels,
        "upper_band_limit", upper_band_limit, "lower_band_limit",
        lower_band_limit, "smoothing_bits", smoothing_bits, "even_smoothing",
        even_smoothing, "odd_smoothing", odd_smoothing,
        "min_signal_remaining", min_signal_remaining, "enable_pcan",
        enable_pcan, "pcan_strength", pcan_strength, "pcan_offset",
        pcan_offset, "gain_bits", gain_bits, "enable_log", enable_log,
        "scale_shift", scale_shift, "left_context", left_context,
        "right_context", right_context, "frame_stride", frame_stride,
        "zero_padding", zero_padding, "out_scale", out_scale, "out_type",
        out_type)
      return _result
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
    except _core._FallbackException:
      pass
    try:
      return audio_microfrontend_eager_fallback(
          audio, sample_rate=sample_rate, window_size=window_size,
          window_step=window_step, num_channels=num_channels,
          upper_band_limit=upper_band_limit,
          lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits,
          even_smoothing=even_smoothing, odd_smoothing=odd_smoothing,
          min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan,
          pcan_strength=pcan_strength, pcan_offset=pcan_offset,
          gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift,
          left_context=left_context, right_context=right_context,
          frame_stride=frame_stride, zero_padding=zero_padding,
          out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx)
    except _core._SymbolicException:
      pass  # Add nodes to the TensorFlow graph.
    except (TypeError, ValueError):
      result = _dispatch.dispatch(
            audio_microfrontend, (), dict(audio=audio,
                                          sample_rate=sample_rate,
                                          window_size=window_size,
                                          window_step=window_step,
                                          num_channels=num_channels,
                                          upper_band_limit=upper_band_limit,
                                          lower_band_limit=lower_band_limit,
                                          smoothing_bits=smoothing_bits,
                                          even_smoothing=even_smoothing,
                                          odd_smoothing=odd_smoothing,
                                          min_signal_remaining=min_signal_remaining,
                                          enable_pcan=enable_pcan,
                                          pcan_strength=pcan_strength,
                                          pcan_offset=pcan_offset,
                                          gain_bits=gain_bits,
                                          enable_log=enable_log,
                                          scale_shift=scale_shift,
                                          left_context=left_context,
                                          right_context=right_context,
                                          frame_stride=frame_stride,
                                          zero_padding=zero_padding,
                                          out_scale=out_scale,
                                          out_type=out_type, name=name)
          )
      if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
        return result
      raise
  # Add nodes to the TensorFlow graph.
  if sample_rate is None:
    sample_rate = 16000
  sample_rate = _execute.make_int(sample_rate, "sample_rate")
  if window_size is None:
    window_size = 25
  window_size = _execute.make_int(window_size, "window_size")
  if window_step is None:
    window_step = 10
  window_step = _execute.make_int(window_step, "window_step")
  if num_channels is None:
    num_channels = 32
  num_channels = _execute.make_int(num_channels, "num_channels")
  if upper_band_limit is None:
    upper_band_limit = 7500
  upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit")
  if lower_band_limit is None:
    lower_band_limit = 125
  lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit")
  if smoothing_bits is None:
    smoothing_bits = 10
  smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits")
  if even_smoothing is None:
    even_smoothing = 0.025
  even_smoothing = _execute.make_float(even_smoothing, "even_smoothing")
  if odd_smoothing is None:
    odd_smoothing = 0.06
  odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing")
  if min_signal_remaining is None:
    min_signal_remaining = 0.05
  min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining")
  if enable_pcan is None:
    enable_pcan = False
  enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan")
  if pcan_strength is None:
    pcan_strength = 0.95
  pcan_strength = _execute.make_float(pcan_strength, "pcan_strength")
  if pcan_offset is None:
    pcan_offset = 80
  pcan_offset = _execute.make_float(pcan_offset, "pcan_offset")
  if gain_bits is None:
    gain_bits = 21
  gain_bits = _execute.make_int(gain_bits, "gain_bits")
  if enable_log is None:
    enable_log = True
  enable_log = _execute.make_bool(enable_log, "enable_log")
  if scale_shift is None:
    scale_shift = 6
  scale_shift = _execute.make_int(scale_shift, "scale_shift")
  if left_context is None:
    left_context = 0
  left_context = _execute.make_int(left_context, "left_context")
  if right_context is None:
    right_context = 0
  right_context = _execute.make_int(right_context, "right_context")
  if frame_stride is None:
    frame_stride = 1
  frame_stride = _execute.make_int(frame_stride, "frame_stride")
  if zero_padding is None:
    zero_padding = False
  zero_padding = _execute.make_bool(zero_padding, "zero_padding")
  if out_scale is None:
    out_scale = 1
  out_scale = _execute.make_int(out_scale, "out_scale")
  if out_type is None:
    out_type = _dtypes.uint16
  out_type = _execute.make_type(out_type, "out_type")
  try:
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "AudioMicrofrontend", audio=audio, sample_rate=sample_rate,
                              window_size=window_size,
                              window_step=window_step,
                              num_channels=num_channels,
                              upper_band_limit=upper_band_limit,
                              lower_band_limit=lower_band_limit,
                              smoothing_bits=smoothing_bits,
                              even_smoothing=even_smoothing,
                              odd_smoothing=odd_smoothing,
                              min_signal_remaining=min_signal_remaining,
                              enable_pcan=enable_pcan,
                              pcan_strength=pcan_strength,
                              pcan_offset=pcan_offset, gain_bits=gain_bits,
                              enable_log=enable_log, scale_shift=scale_shift,
                              left_context=left_context,
                              right_context=right_context,
                              frame_stride=frame_stride,
                              zero_padding=zero_padding, out_scale=out_scale,
                              out_type=out_type, name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          audio_microfrontend, (), dict(audio=audio, sample_rate=sample_rate,
                                        window_size=window_size,
                                        window_step=window_step,
                                        num_channels=num_channels,
                                        upper_band_limit=upper_band_limit,
                                        lower_band_limit=lower_band_limit,
                                        smoothing_bits=smoothing_bits,
                                        even_smoothing=even_smoothing,
                                        odd_smoothing=odd_smoothing,
                                        min_signal_remaining=min_signal_remaining,
                                        enable_pcan=enable_pcan,
                                        pcan_strength=pcan_strength,
                                        pcan_offset=pcan_offset,
                                        gain_bits=gain_bits,
                                        enable_log=enable_log,
                                        scale_shift=scale_shift,
                                        left_context=left_context,
                                        right_context=right_context,
                                        frame_stride=frame_stride,
                                        zero_padding=zero_padding,
                                        out_scale=out_scale,
                                        out_type=out_type, name=name)
        )
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("sample_rate", _op._get_attr_int("sample_rate"), "window_size",
              _op._get_attr_int("window_size"), "window_step",
              _op._get_attr_int("window_step"), "num_channels",
              _op._get_attr_int("num_channels"), "upper_band_limit",
              _op.get_attr("upper_band_limit"), "lower_band_limit",
              _op.get_attr("lower_band_limit"), "smoothing_bits",
              _op._get_attr_int("smoothing_bits"), "even_smoothing",
              _op.get_attr("even_smoothing"), "odd_smoothing",
              _op.get_attr("odd_smoothing"), "min_signal_remaining",
              _op.get_attr("min_signal_remaining"), "enable_pcan",
              _op._get_attr_bool("enable_pcan"), "pcan_strength",
              _op.get_attr("pcan_strength"), "pcan_offset",
              _op.get_attr("pcan_offset"), "gain_bits",
              _op._get_attr_int("gain_bits"), "enable_log",
              _op._get_attr_bool("enable_log"), "scale_shift",
              _op._get_attr_int("scale_shift"), "left_context",
              _op._get_attr_int("left_context"), "right_context",
              _op._get_attr_int("right_context"), "frame_stride",
              _op._get_attr_int("frame_stride"), "zero_padding",
              _op._get_attr_bool("zero_padding"), "out_scale",
              _op._get_attr_int("out_scale"), "out_type",
              _op._get_attr_type("out_type"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "AudioMicrofrontend", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
コード例 #27
0
def audio_summary(tag, tensor, sample_rate, max_outputs=3, name=None):
  r"""Outputs a `Summary` protocol buffer with audio.

  The summary has up to `max_outputs` summary values containing audio. The
  audio is built from `tensor` which must be 3-D with shape `[batch_size,
  frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
  assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.

  The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
  build the `tag` of the summary values:

  *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
  *  If `max_outputs` is greater than 1, the summary value tags are
     generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

  Args:
    tag: A `Tensor` of type `string`.
      Scalar. Used to build the `tag` attribute of the summary values.
    tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`.
    sample_rate: A `float`. The sample rate of the signal in hertz.
    max_outputs: An optional `int` that is `>= 1`. Defaults to `3`.
      Max number of batch elements to generate audio for.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "AudioSummary", name,
        tld.op_callbacks, tag, tensor, "sample_rate", sample_rate,
        "max_outputs", max_outputs)
      return _result
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
    except _core._FallbackException:
      pass
    try:
      return audio_summary_eager_fallback(
          tag, tensor, sample_rate=sample_rate, max_outputs=max_outputs,
          name=name, ctx=_ctx)
    except _core._SymbolicException:
      pass  # Add nodes to the TensorFlow graph.
  # Add nodes to the TensorFlow graph.
  sample_rate = _execute.make_float(sample_rate, "sample_rate")
  if max_outputs is None:
    max_outputs = 3
  max_outputs = _execute.make_int(max_outputs, "max_outputs")
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "AudioSummary", tag=tag, tensor=tensor, sample_rate=sample_rate,
                        max_outputs=max_outputs, name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("sample_rate", _op.get_attr("sample_rate"), "max_outputs",
              _op._get_attr_int("max_outputs"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "AudioSummary", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
コード例 #28
0
def single_image_random_dot_stereograms(depth_values,
                                        hidden_surface_removal=True,
                                        convergence_dots_size=8,
                                        dots_per_inch=72,
                                        eye_separation=2.5,
                                        mu=0.3333,
                                        normalize=True,
                                        normalize_max=-100,
                                        normalize_min=100,
                                        border_level=0,
                                        number_colors=256,
                                        output_image_shape=[1024, 768, 1],
                                        output_data_window=[1022, 757],
                                        name=None):
    r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data witin the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encloded 'depth_values'
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        if hidden_surface_removal is None:
            hidden_surface_removal = True
        hidden_surface_removal = _execute.make_bool(hidden_surface_removal,
                                                    "hidden_surface_removal")
        if convergence_dots_size is None:
            convergence_dots_size = 8
        convergence_dots_size = _execute.make_int(convergence_dots_size,
                                                  "convergence_dots_size")
        if dots_per_inch is None:
            dots_per_inch = 72
        dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
        if eye_separation is None:
            eye_separation = 2.5
        eye_separation = _execute.make_float(eye_separation, "eye_separation")
        if mu is None:
            mu = 0.3333
        mu = _execute.make_float(mu, "mu")
        if normalize is None:
            normalize = True
        normalize = _execute.make_bool(normalize, "normalize")
        if normalize_max is None:
            normalize_max = -100
        normalize_max = _execute.make_float(normalize_max, "normalize_max")
        if normalize_min is None:
            normalize_min = 100
        normalize_min = _execute.make_float(normalize_min, "normalize_min")
        if border_level is None:
            border_level = 0
        border_level = _execute.make_float(border_level, "border_level")
        if number_colors is None:
            number_colors = 256
        number_colors = _execute.make_int(number_colors, "number_colors")
        if output_image_shape is None:
            output_image_shape = [1024, 768, 1]
        output_image_shape = _execute.make_shape(output_image_shape,
                                                 "output_image_shape")
        if output_data_window is None:
            output_data_window = [1022, 757]
        output_data_window = _execute.make_shape(output_data_window,
                                                 "output_data_window")
        _, _, _op = _op_def_lib._apply_op_helper(
            "SingleImageRandomDotStereograms",
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
                  _op.get_attr("hidden_surface_removal"),
                  "convergence_dots_size",
                  _op.get_attr("convergence_dots_size"), "dots_per_inch",
                  _op.get_attr("dots_per_inch"), "eye_separation",
                  _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
                  "normalize", _op.get_attr("normalize"), "normalize_max",
                  _op.get_attr("normalize_max"), "normalize_min",
                  _op.get_attr("normalize_min"), "border_level",
                  _op.get_attr("border_level"), "number_colors",
                  _op.get_attr("number_colors"), "output_image_shape",
                  _op.get_attr("output_image_shape"), "output_data_window",
                  _op.get_attr("output_data_window"))
        _execute.record_gradient("SingleImageRandomDotStereograms",
                                 _inputs_flat, _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name,
                "SingleImageRandomDotStereograms", name,
                _ctx._post_execution_callbacks, depth_values,
                "hidden_surface_removal", hidden_surface_removal,
                "convergence_dots_size", convergence_dots_size,
                "dots_per_inch", dots_per_inch, "eye_separation",
                eye_separation, "mu", mu, "normalize", normalize,
                "normalize_max", normalize_max, "normalize_min", normalize_min,
                "border_level", border_level, "number_colors", number_colors,
                "output_image_shape", output_image_shape, "output_data_window",
                output_data_window)
            return _result
        except _core._FallbackException:
            return single_image_random_dot_stereograms_eager_fallback(
                depth_values,
                hidden_surface_removal=hidden_surface_removal,
                convergence_dots_size=convergence_dots_size,
                dots_per_inch=dots_per_inch,
                eye_separation=eye_separation,
                mu=mu,
                normalize=normalize,
                normalize_max=normalize_max,
                normalize_min=normalize_min,
                border_level=border_level,
                number_colors=number_colors,
                output_image_shape=output_image_shape,
                output_data_window=output_data_window,
                name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
コード例 #29
0
def block_lstm(seq_len_max,
               x,
               cs_prev,
               h_prev,
               w,
               wci,
               wcf,
               wco,
               b,
               forget_bias=1,
               cell_clip=3,
               use_peephole=False,
               name=None):
    r"""Computes the LSTM cell forward propagation for all the time steps.

  This is equivalent to applying LSTMBlockCell in a loop, like so:

  ```python
  for x1 in unpack(x):
    i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
      x1, cs_prev, h_prev, w, wci, wcf, wco, b)
    cs_prev = cs1
    h_prev = h1
    i.append(i1)
    cs.append(cs1)
    f.append(f1)
    o.append(o1)
    ci.append(ci1)
    co.append(co1)
    h.append(h1)
  return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
  ```

  Args:
    seq_len_max: A `Tensor` of type `int64`.
      Maximum time length actually used by this input. Outputs are padded
      with zeros beyond this length.
    x: A `Tensor`. Must be one of the following types: `float32`.
      The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
    cs_prev: A `Tensor`. Must have the same type as `x`.
      Value of the initial cell state.
    h_prev: A `Tensor`. Must have the same type as `x`.
      Initial output of cell (to be used for peephole).
    w: A `Tensor`. Must have the same type as `x`. The weight matrix.
    wci: A `Tensor`. Must have the same type as `x`.
      The weight matrix for input gate peephole connection.
    wcf: A `Tensor`. Must have the same type as `x`.
      The weight matrix for forget gate peephole connection.
    wco: A `Tensor`. Must have the same type as `x`.
      The weight matrix for output gate peephole connection.
    b: A `Tensor`. Must have the same type as `x`. The bias vector.
    forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
    cell_clip: An optional `float`. Defaults to `3`.
      Value to clip the 'cs' value to.
    use_peephole: An optional `bool`. Defaults to `False`.
      Whether to use peephole weights.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).

    i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.
    cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.
    f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.
    o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.
    ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.
    co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.
    h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if forget_bias is None:
            forget_bias = 1
        forget_bias = _execute.make_float(forget_bias, "forget_bias")
        if cell_clip is None:
            cell_clip = 3
        cell_clip = _execute.make_float(cell_clip, "cell_clip")
        if use_peephole is None:
            use_peephole = False
        use_peephole = _execute.make_bool(use_peephole, "use_peephole")
        _, _, _op = _op_def_lib._apply_op_helper("BlockLSTM",
                                                 seq_len_max=seq_len_max,
                                                 x=x,
                                                 cs_prev=cs_prev,
                                                 h_prev=h_prev,
                                                 w=w,
                                                 wci=wci,
                                                 wcf=wcf,
                                                 wco=wco,
                                                 b=b,
                                                 forget_bias=forget_bias,
                                                 cell_clip=cell_clip,
                                                 use_peephole=use_peephole,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
                  _op.get_attr("cell_clip"), "use_peephole",
                  _op.get_attr("use_peephole"), "T", _op.get_attr("T"))
        _execute.record_gradient("BlockLSTM", _inputs_flat, _attrs, _result,
                                 name)
        _result = _BlockLSTMOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "BlockLSTM", name, _ctx._post_execution_callbacks, seq_len_max,
                x, cs_prev, h_prev, w, wci, wcf, wco, b, "forget_bias",
                forget_bias, "cell_clip", cell_clip, "use_peephole",
                use_peephole)
            _result = _BlockLSTMOutput._make(_result)
            return _result
        except _core._FallbackException:
            return block_lstm_eager_fallback(seq_len_max,
                                             x,
                                             cs_prev,
                                             h_prev,
                                             w,
                                             wci,
                                             wcf,
                                             wco,
                                             b,
                                             forget_bias=forget_bias,
                                             cell_clip=cell_clip,
                                             use_peephole=use_peephole,
                                             name=name,
                                             ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
コード例 #30
0
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
  r"""Computes the LSTM cell forward propagation for 1 time step.

  This implementation uses 1 weight matrix and 1 bias vector, and there's an
  optional peephole connection.

  This kernel op implements the following mathematical equations:

  ```python
  xh = [x, h_prev]
  [i, f, ci, o] = xh * w + b
  f = f + forget_bias

  if not use_peephole:
    wci = wcf = wco = 0

  i = sigmoid(cs_prev * wci + i)
  f = sigmoid(cs_prev * wcf + f)
  ci = tanh(ci)

  cs = ci .* i + cs_prev .* f
  cs = clip(cs, cell_clip)

  o = sigmoid(cs * wco + o)
  co = tanh(cs)
  h = co .* o
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`.
      The input to the LSTM cell, shape (batch_size, num_inputs).
    cs_prev: A `Tensor`. Must have the same type as `x`.
      Value of the cell state at previous time step.
    h_prev: A `Tensor`. Must have the same type as `x`.
      Output of the previous cell at previous time step.
    w: A `Tensor`. Must have the same type as `x`. The weight matrix.
    wci: A `Tensor`. Must have the same type as `x`.
      The weight matrix for input gate peephole connection.
    wcf: A `Tensor`. Must have the same type as `x`.
      The weight matrix for forget gate peephole connection.
    wco: A `Tensor`. Must have the same type as `x`.
      The weight matrix for output gate peephole connection.
    b: A `Tensor`. Must have the same type as `x`. The bias vector.
    forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
    cell_clip: An optional `float`. Defaults to `3`.
      Value to clip the 'cs' value to.
    use_peephole: An optional `bool`. Defaults to `False`.
      Whether to use peephole weights.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).

    i: A `Tensor`. Has the same type as `x`. The input gate.
    cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
    f: A `Tensor`. Has the same type as `x`. The forget gate.
    o: A `Tensor`. Has the same type as `x`. The output gate.
    ci: A `Tensor`. Has the same type as `x`. The cell input.
    co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
    h: A `Tensor`. Has the same type as `x`. The output h vector.
  """
  if forget_bias is None:
    forget_bias = 1
  forget_bias = _execute.make_float(forget_bias, "forget_bias")
  if cell_clip is None:
    cell_clip = 3
  cell_clip = _execute.make_float(cell_clip, "cell_clip")
  if use_peephole is None:
    use_peephole = False
  use_peephole = _execute.make_bool(use_peephole, "use_peephole")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "LSTMBlockCell", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci,
        wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip,
        use_peephole=use_peephole, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
              _op.get_attr("cell_clip"), "use_peephole",
              _op.get_attr("use_peephole"), "T", _op.get_attr("T"))
  else:
    _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
    (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
    _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
    _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
              "use_peephole", use_peephole, "T", _attr_T)
    _result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
                               attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "LSTMBlockCell", _inputs_flat, _attrs, _result, name)
  _result = _LSTMBlockCellOutput._make(_result)
  return _result
コード例 #31
0
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
  r"""Computes the LSTM cell forward propagation for all the time steps.

  This is equivalent to applying LSTMBlockCell in a loop, like so:

  ```python
  for x1 in unpack(x):
    i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
      x1, cs_prev, h_prev, w, wci, wcf, wco, b)
    cs_prev = cs1
    h_prev = h1
    i.append(i1)
    cs.append(cs1)
    f.append(f1)
    o.append(o1)
    ci.append(ci1)
    co.append(co1)
    h.append(h1)
  return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
  ```

  Args:
    seq_len_max: A `Tensor` of type `int64`.
      Maximum time length actually used by this input. Outputs are padded
      with zeros beyond this length.
    x: A `Tensor`. Must be one of the following types: `float32`.
      The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
    cs_prev: A `Tensor`. Must have the same type as `x`.
      Value of the initial cell state.
    h_prev: A `Tensor`. Must have the same type as `x`.
      Initial output of cell (to be used for peephole).
    w: A `Tensor`. Must have the same type as `x`. The weight matrix.
    wci: A `Tensor`. Must have the same type as `x`.
      The weight matrix for input gate peephole connection.
    wcf: A `Tensor`. Must have the same type as `x`.
      The weight matrix for forget gate peephole connection.
    wco: A `Tensor`. Must have the same type as `x`.
      The weight matrix for output gate peephole connection.
    b: A `Tensor`. Must have the same type as `x`. The bias vector.
    forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
    cell_clip: An optional `float`. Defaults to `3`.
      Value to clip the 'cs' value to.
    use_peephole: An optional `bool`. Defaults to `False`.
      Whether to use peephole weights.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).

    i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.
    cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.
    f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.
    o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.
    ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.
    co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.
    h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.
  """
  if forget_bias is None:
    forget_bias = 1
  forget_bias = _execute.make_float(forget_bias, "forget_bias")
  if cell_clip is None:
    cell_clip = 3
  cell_clip = _execute.make_float(cell_clip, "cell_clip")
  if use_peephole is None:
    use_peephole = False
  use_peephole = _execute.make_bool(use_peephole, "use_peephole")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "BlockLSTM", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
        h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
        forget_bias=forget_bias, cell_clip=cell_clip,
        use_peephole=use_peephole, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
              _op.get_attr("cell_clip"), "use_peephole",
              _op.get_attr("use_peephole"), "T", _op.get_attr("T"))
  else:
    _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
    (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
    seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
    _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
    _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
              "use_peephole", use_peephole, "T", _attr_T)
    _result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat,
                               attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "BlockLSTM", _inputs_flat, _attrs, _result, name)
  _result = _BlockLSTMOutput._make(_result)
  return _result
コード例 #32
0
def single_image_random_dot_stereograms(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None):
  r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if hidden_surface_removal is None:
      hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
    if convergence_dots_size is None:
      convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
    if dots_per_inch is None:
      dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
      eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
      mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
      normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
      normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
      normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
      border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
      number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
      output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
    if output_data_window is None:
      output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window, "output_data_window")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SingleImageRandomDotStereograms", depth_values=depth_values,
        hidden_surface_removal=hidden_surface_removal,
        convergence_dots_size=convergence_dots_size,
        dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
        normalize=normalize, normalize_max=normalize_max,
        normalize_min=normalize_min, border_level=border_level,
        number_colors=number_colors, output_image_shape=output_image_shape,
        output_data_window=output_data_window, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SingleImageRandomDotStereograms", name,
        _ctx._post_execution_callbacks, depth_values,
        "hidden_surface_removal", hidden_surface_removal,
        "convergence_dots_size", convergence_dots_size, "dots_per_inch",
        dots_per_inch, "eye_separation", eye_separation, "mu", mu,
        "normalize", normalize, "normalize_max", normalize_max,
        "normalize_min", normalize_min, "border_level", border_level,
        "number_colors", number_colors, "output_image_shape",
        output_image_shape, "output_data_window", output_data_window)
      return _result
    except _core._FallbackException:
      return single_image_random_dot_stereograms_eager_fallback(
          depth_values, hidden_surface_removal=hidden_surface_removal,
          convergence_dots_size=convergence_dots_size,
          dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
          normalize=normalize, normalize_max=normalize_max,
          normalize_min=normalize_min, border_level=border_level,
          number_colors=number_colors, output_image_shape=output_image_shape,
          output_data_window=output_data_window, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
コード例 #33
0
def lstm_block_cell(x,
                    cs_prev,
                    h_prev,
                    w,
                    wci,
                    wcf,
                    wco,
                    b,
                    forget_bias=1,
                    cell_clip=3,
                    use_peephole=False,
                    name=None):
    r"""Computes the LSTM cell forward propagation for 1 time step.

  This implementation uses 1 weight matrix and 1 bias vector, and there's an
  optional peephole connection.

  This kernel op implements the following mathematical equations:

  ```python
  xh = [x, h_prev]
  [i, f, ci, o] = xh * w + b
  f = f + forget_bias

  if not use_peephole:
    wci = wcf = wco = 0

  i = sigmoid(cs_prev * wci + i)
  f = sigmoid(cs_prev * wcf + f)
  ci = tanh(ci)

  cs = ci .* i + cs_prev .* f
  cs = clip(cs, cell_clip)

  o = sigmoid(cs * wco + o)
  co = tanh(cs)
  h = co .* o
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`.
      The input to the LSTM cell, shape (batch_size, num_inputs).
    cs_prev: A `Tensor`. Must have the same type as `x`.
      Value of the cell state at previous time step.
    h_prev: A `Tensor`. Must have the same type as `x`.
      Output of the previous cell at previous time step.
    w: A `Tensor`. Must have the same type as `x`. The weight matrix.
    wci: A `Tensor`. Must have the same type as `x`.
      The weight matrix for input gate peephole connection.
    wcf: A `Tensor`. Must have the same type as `x`.
      The weight matrix for forget gate peephole connection.
    wco: A `Tensor`. Must have the same type as `x`.
      The weight matrix for output gate peephole connection.
    b: A `Tensor`. Must have the same type as `x`. The bias vector.
    forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
    cell_clip: An optional `float`. Defaults to `3`.
      Value to clip the 'cs' value to.
    use_peephole: An optional `bool`. Defaults to `False`.
      Whether to use peephole weights.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).

    i: A `Tensor`. Has the same type as `x`. The input gate.
    cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
    f: A `Tensor`. Has the same type as `x`. The forget gate.
    o: A `Tensor`. Has the same type as `x`. The output gate.
    ci: A `Tensor`. Has the same type as `x`. The cell input.
    co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
    h: A `Tensor`. Has the same type as `x`. The output h vector.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if forget_bias is None:
            forget_bias = 1
        forget_bias = _execute.make_float(forget_bias, "forget_bias")
        if cell_clip is None:
            cell_clip = 3
        cell_clip = _execute.make_float(cell_clip, "cell_clip")
        if use_peephole is None:
            use_peephole = False
        use_peephole = _execute.make_bool(use_peephole, "use_peephole")
        _, _, _op = _op_def_lib._apply_op_helper("LSTMBlockCell",
                                                 x=x,
                                                 cs_prev=cs_prev,
                                                 h_prev=h_prev,
                                                 w=w,
                                                 wci=wci,
                                                 wcf=wcf,
                                                 wco=wco,
                                                 b=b,
                                                 forget_bias=forget_bias,
                                                 cell_clip=cell_clip,
                                                 use_peephole=use_peephole,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
                  _op.get_attr("cell_clip"), "use_peephole",
                  _op.get_attr("use_peephole"), "T", _op.get_attr("T"))
        _execute.record_gradient("LSTMBlockCell", _inputs_flat, _attrs,
                                 _result, name)
        _result = _LSTMBlockCellOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "LSTMBlockCell", name, _ctx._post_execution_callbacks, x,
                cs_prev, h_prev, w, wci, wcf, wco, b, "forget_bias",
                forget_bias, "cell_clip", cell_clip, "use_peephole",
                use_peephole)
            _result = _LSTMBlockCellOutput._make(_result)
            return _result
        except _core._FallbackException:
            return lstm_block_cell_eager_fallback(x,
                                                  cs_prev,
                                                  h_prev,
                                                  w,
                                                  wci,
                                                  wcf,
                                                  wco,
                                                  b,
                                                  forget_bias=forget_bias,
                                                  cell_clip=cell_clip,
                                                  use_peephole=use_peephole,
                                                  name=name,
                                                  ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
コード例 #34
0
ファイル: gen_training_ops.py プロジェクト: whqkdhfh13/sswp
def center_tree_ensemble_bias(tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates, learner_config, centering_epsilon=0.01, name=None):
  r"""Centers the tree ensemble bias before adding trees based on feature splits.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    delta_updates: A `Tensor` of type `float32`.
      Rank 1 Tensor containing delta updates per bias dimension.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    centering_epsilon: An optional `float`. Defaults to `0.01`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `bool`.
    Scalar indicating whether more centering is needed.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    learner_config = _execute.make_str(learner_config, "learner_config")
    if centering_epsilon is None:
      centering_epsilon = 0.01
    centering_epsilon = _execute.make_float(centering_epsilon, "centering_epsilon")
    _, _, _op = _op_def_lib._apply_op_helper(
        "CenterTreeEnsembleBias", tree_ensemble_handle=tree_ensemble_handle,
        stamp_token=stamp_token, next_stamp_token=next_stamp_token,
        delta_updates=delta_updates, learner_config=learner_config,
        centering_epsilon=centering_epsilon, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("learner_config", _op.get_attr("learner_config"),
              "centering_epsilon", _op.get_attr("centering_epsilon"))
    _execute.record_gradient(
      "CenterTreeEnsembleBias", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "CenterTreeEnsembleBias", name, _ctx._post_execution_callbacks,
        tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates,
        "learner_config", learner_config, "centering_epsilon",
        centering_epsilon)
      return _result
    except _core._FallbackException:
      return center_tree_ensemble_bias_eager_fallback(
          tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates,
          learner_config=learner_config, centering_epsilon=centering_epsilon,
          name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)