Esempio n. 1
0
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
  """Initialize 'ref' with all zeros, ref tensor should be uninitialized.
  If already initialized, you will get ValueError. This op is intended to
  save memory during initialization.
  Args:
    ref: ref of the tensor need to be zero initialized.
    name: optional name for this operation.
  Returns:
    ref that initialized.
  Raises:
    ValueError: If ref tensor is initialized.
  """
  loader.load_op_library(
      resource_loader.get_path_to_datafile("_variable_ops.so"))
  return gen_variable_ops.zero_initializer(ref, name=name)
Esempio n. 2
0
def _maybe_load_nccl_ops_so():
    """Loads nccl ops so if it hasn't been loaded already."""

    with _module_lock:
        global _nccl_ops_so
        if not _nccl_ops_so:
            _nccl_ops_so = loader.load_op_library(
                resource_loader.get_path_to_datafile('_nccl_ops.so'))
Esempio n. 3
0
def _maybe_load_nccl_ops_so():
  """Loads nccl ops so if it hasn't been loaded already."""

  with _module_lock:
    global _nccl_ops_so
    if not _nccl_ops_so:
      _nccl_ops_so = loader.load_op_library(
          resource_loader.get_path_to_datafile('_nccl_ops.so'))
Esempio n. 4
0
  def emit():
    source = request_code()
    meta_bgn = source.index('// GLOBALS: ') + len('// GLOBALS: ')
    meta_pos = source.index(' -> ', meta_bgn)
    meta_end = source.index('\n', meta_pos)
    meta_inputs = source[meta_bgn:meta_pos - 1].split('], ')
    meta_outputs = source[meta_pos + len(' -> '):meta_end - 1].split('], ')
    kwargs['source'] = source
    kwargs['antares_ir'] = ir

    def parse_tensor(encoded_tensor):
      name, parts = encoded_tensor.split(':')
      dtype, shapes = parts.split('[')
      return name, dtype, [int(x) for x in shapes.split(', ')]

    code_name = 'Antares' + hashlib.sha256(expression.encode()).hexdigest()
    tf_module_path = f'/tmp/antares_tf_{backend}_{code_name}.cc'

    shutil.copyfile(resource_loader.get_path_to_datafile('main_ops.cc.in'), tf_module_path)
    with open(tf_module_path, 'a') as fp:
      fp.write('REGISTER_OP(OP_NAME)')
      for i in range(len(meta_inputs)):
        name, dtype, shape = parse_tensor(meta_inputs[i])
        fp.write(f'\n  .Input("{name}: {dtype}") // {shape}')
      for i in range(len(meta_outputs)):
        name, dtype, shape = parse_tensor(meta_outputs[i])
        fp.write(f'\n  .Output("{name}: {dtype}") // {shape}')
      fp.write('\n  .Attr("source: string").Attr("antares_ir: string").Attr("tf_module_path: string").Attr("meta_inputs: list(string)").Attr("meta_outputs: list(string)").SetIsStateful()')
      fp.write('\n  .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {')
      for i in range(len(meta_outputs)):
        name, dtype, shape = parse_tensor(meta_outputs[i])
        fp.write(f'\n    c->set_output({i}, c->MakeShape({{ {str(shape)[1:-1]} }}));')
      fp.write('\n    return ::tensorflow::Status::OK();\n  });')

    libops_path = get_tensorflow_antares_component(tf_module_path, code_name, 'gcc')
    library = loader.load_op_library(libops_path)
    antares_func = None
    for attr in dir(library):
      if attr.startswith('antares') and '_eager' not in attr:
        antares_func = getattr(library, attr)
        break
    if not antares_func:
      raise Exception("Invalid antares component is made.")

    kwargs['tf_module_path'] = tf_module_path
    kwargs['meta_inputs'] = meta_inputs
    kwargs['meta_outputs'] = meta_outputs
    result = antares_func(**kwargs)

    output_names = [parse_tensor(x)[0] for x in meta_outputs]
    if len(output_names) == 1:
      result = tf.identity(result, name=output_names[0])
    else:
      result = list(result)
      for i in range(len(result)):
        result[i] = tf.identity(result[i], name=output_names[i])
      result = tuple(result)
    return result
Esempio n. 5
0
def init_library():
    global communicate_library
    if communicate_library is None:
        libcommunicate_path = get_tensorflow_antares_component(
            os.path.dirname(__file__) + '/communicate_ops.cc',
            'AntaresCommunicate',
            using_mpi=True)
        communicate_library = loader.load_op_library(libcommunicate_path)
    return communicate_library
Esempio n. 6
0
def Load():
  """Load training ops library and return the loaded module."""
  with _ops_lock:
    global _training_ops
    if not _training_ops:
      ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
      logging.info('data path: %s', ops_path)
      _training_ops = loader.load_op_library(ops_path)

      assert _training_ops, 'Could not load _training_ops.so'
  return _training_ops
Esempio n. 7
0
def Load():
    """Load training ops library and return the loaded module."""
    with _ops_lock:
        global _training_ops
        if not _training_ops:
            ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
            logging.info('data path: %s', ops_path)
            _training_ops = loader.load_op_library(ops_path)

            assert _training_ops, 'Could not load _training_ops.so'
    return _training_ops
Esempio n. 8
0
def load_op(name, has_grad=False, public=False):
  global __all__
  path = os.path.join(os.path.dirname(__file__), '%s_op.so' % name)
  if os.path.isfile(path):
    _module = loader.load_op_library(path)
    if has_grad:
      if public:
        __all__.append('%s' % name)
        __all__.append('%s_grad' % name)
      return getattr(_module, '%s' % name), getattr(_module, '%s_grad' % name)
    else:
      if public:
        __all__.append('%s' % name)
      return getattr(_module, '%s' % name)
  else:
    print('[WARNING]: %s does not exists' % name)
Esempio n. 9
0
def make_op(antares_ir, inputs, server_addr=None):
    if server_addr is None:
        server_addr = __default_server_addr__
    input_dict, kwargs = {}, {}
    for i in range(len(inputs)):
        dtype = str(inputs[i].dtype.name)
        input_dict['input%d' % i] = {
            'dtype': dtype[:-4] if dtype.endswith('_ref') else dtype,
            'shape': [int(x) for x in inputs[i].shape]
        }
        kwargs['input%d' % i] = inputs[i]

    input_dict = json.dumps(input_dict)
    expression = '- einstein_v2("%s", input_dict=%s)' % (antares_ir.replace(
        '"', '\\"'), input_dict)
    print('+ [Antares Op]', expression)

    h = http_client.HTTPConnection(server_addr, timeout=10)
    try:
        h.request('GET', '/', headers={'COMPUTE_V1': expression})
    except:
        raise Exception(
            "Failed to contact with Antares server: %s (not started?)" %
            server_addr)
    res = h.getresponse()
    if res.status != 200:
        raise Exception("Fail to get server response, reason: %s" % res.reason)

    source = res.read().decode()
    try:
        meta_bgn = source.index('///') + len('///')
    except:
        raise Exception("Illegal syntax for Antares expression: %s" %
                        expression)
    meta_pos = source.index(':', meta_bgn)
    meta_end = source.index('\n', meta_pos)
    meta_inputs = source[meta_bgn:meta_pos].split(',')
    meta_outputs = source[meta_pos + 1:meta_end].split(',')
    kwargs['source'] = source
    kwargs['antares_ir'] = antares_ir

    code_name = 'Antares' + hashlib.sha256(expression.encode()).hexdigest()
    tf_module_path = '/tmp/antares_tf_%s.cc' % code_name

    shutil.copyfile(resource_loader.get_path_to_datafile('main_ops.cc.in'),
                    tf_module_path)
    with open(tf_module_path, 'a') as fp:
        fp.write('REGISTER_OP(OP_NAME)')
        for i in range(len(meta_inputs)):
            shape, dtype, name = meta_inputs[i].split('/')
            fp.write('\n  .Input("%s: %s") // %s' %
                     (name, dtype, shape.replace('-', ', ')))
        for i in range(len(meta_outputs)):
            shape, dtype, name = meta_outputs[i].split('/')
            fp.write('\n  .Output("%s: %s") // %s' %
                     (name, dtype, shape.replace('-', ', ')))
        fp.write(
            '\n  .Attr("source: string").Attr("antares_ir: string").Attr("tf_module_path: string").Attr("meta_inputs: list(string)").Attr("meta_outputs: list(string)").SetIsStateful()'
        )
        fp.write(
            '\n  .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {'
        )
        for i in range(len(meta_outputs)):
            fp.write('\n    c->set_output(%d, c->MakeShape({%s}));' %
                     (i, meta_outputs[i].split('/')[0].replace('-', ', ')))
        fp.write('\n    return ::tensorflow::Status::OK();\n  });')

    libops_path = get_tensorflow_antares_component(tf_module_path, code_name)
    library = loader.load_op_library(libops_path)
    antares_func = None
    for attr in dir(library):
        if attr.startswith('antares') and '_eager' not in attr:
            antares_func = getattr(library, attr)
            break
    if not antares_func:
        raise Exception("Invalid antares component is made.")

    kwargs['tf_module_path'] = tf_module_path
    kwargs['meta_inputs'] = meta_inputs
    kwargs['meta_outputs'] = meta_outputs
    result = antares_func(**kwargs)

    result._antares_props = {'COMPUTE_V1': expression}
    return result
Esempio n. 10
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the reduce slice operators."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.reduce_slice_ops.ops import gen_reduce_slice_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader


_reduce_slice_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_reduce_slice_ops.so"))


reduce_slice_sum = gen_reduce_slice_ops.reduce_slice_sum
reduce_slice_prod = gen_reduce_slice_ops.reduce_slice_prod
reduce_slice_max = gen_reduce_slice_ops.reduce_slice_max
reduce_slice_min = gen_reduce_slice_ops.reduce_slice_min
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing fused conv2d bias_add and relu."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.fused_conv.ops import gen_fused_conv2d_bias_activation_op
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_fused_conv2d_bias_activation_op_so = loader.load_op_library(
    resource_loader.get_path_to_datafile(
        "_fused_conv2d_bias_activation_op.so"))


# pylint: disable=redefined-builtin
def fused_conv2d_bias_activation(conv_input,
                                 filter,
                                 bias,
                                 strides=None,
                                 padding=None,
                                 conv_input_scale=1.0,
                                 side_input_scale=0.0,
                                 side_input=None,
                                 activation_mode="Relu",
                                 data_format=None,
                                 filter_format=None,
Esempio n. 12
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import resource_loader

_distort_image_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile('_distort_image_ops.so'))


# pylint: disable=invalid-name
def random_hsv_in_yiq(image,
                      max_delta_hue=0,
                      lower_saturation=1,
                      upper_saturation=1,
                      lower_value=1,
                      upper_value=1,
                      seed=None):
  """Adjust hue, saturation, value of an RGB image randomly in YIQ color space.

  Equivalent to `adjust_yiq_hsv()` but uses a `delta_h` randomly
  picked in the interval `[-max_delta_hue, max_delta_hue]`, a `scale_saturation`
  randomly picked in the interval `[lower_saturation, upper_saturation]`, and
Esempio n. 13
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.data.python.ops import gen_prefetching_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_prefetching_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("../../_prefetching_ops.so"))


# TODO(rohanj): Add a python class that constructs resource in the __init__
# method and provides a get_next() that calls the prefetch op.
def function_buffering_resource(string_arg,
                                target_device,
                                shared_name,
                                f,
                                buffer_size,
                                thread_pool_size=1,
                                container="",
                                name=None):
  return gen_prefetching_ops.function_buffering_resource(
      string_arg=string_arg,
      target_device=target_device,
Esempio n. 14
0
import abc

from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader

_lstm_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_lstm_ops.so"))


# pylint: disable=invalid-name
def _lstm_block_cell(x,
                     cs_prev,
                     h_prev,
                     w,
                     b,
                     wci=None,
                     wcf=None,
                     wco=None,
                     forget_bias=None,
                     cell_clip=None,
                     use_peephole=None,
                     name=None):
Esempio n. 15
0
import collections
import numbers

from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf

# pylint: disable=wildcard-import,undefined-variable
from tensorflow.contrib.factorization.python.ops.gen_factorization_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.platform import resource_loader

_factorization_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_factorization_ops.so"))


class WALSModel(object):
  r"""A model for Weighted Alternating Least Squares matrix factorization.

  It minimizes the following loss function over U, V:
   \\( ||W \odot (A - U V^T) ||_F^2 + \lambda (||U||_F^2 + ||V||_F^2) )\\
    where,
    A: input matrix,
    W: weight matrix,
    U, V: row_factors and column_factors matrices,
    \\(\lambda)\\: regularization.
  Also we assume that W is of the following special form:
  \\( W_{ij} = W_0 + R_i * C_j )\\  if \\(A_{ij} \ne 0)\\,
  \\(W_{ij} = W_0)\\ otherwise.
Esempio n. 16
0
# ==============================================================================
"""Python wrapper for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader


_input_pipeline_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))


def obtain_next(string_list_tensor, counter):
  """Basic wrapper for the ObtainNextOp.

  Args:
    string_list_tensor: A tensor that is a list of strings
    counter: an int64 ref tensor to keep track of which element is returned.

  Returns:
    An op that produces the element at counter + 1 in the list, round
    robin style.
  """
  return _input_pipeline_ops.obtain_next(string_list_tensor, counter)
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=unused-import
from tensorflow.contrib.periodic_resample.python.ops import gen_periodic_resample_op

from tensorflow.contrib.periodic_resample.python.ops.gen_periodic_resample_op import periodic_resample, periodic_resample_op_grad

from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
# pylint: enable=unused-import

_periodic_resample_op = loader.load_op_library(
    resource_loader.get_path_to_datafile('_periodic_resample_op.so'))


@ops.RegisterGradient("PeriodicResample")
def _periodic_resample_grad_cc(op, grad):
    return periodic_resample_op_grad(grad, op.inputs[0].shape,
                                     op.get_attr('shape'))
Esempio n. 18
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for bucketization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_bucketization_op = loader.load_op_library(
    resource_loader.get_path_to_datafile("_bucketization_op.so"))


def bucketize(input_tensor, boundaries, name=None):
  """Bucketizes input_tensor by given boundaries.

  See bucketize_op.cc for more details.

  Args:
    input_tensor: A `Tensor` which will be bucketize.
    boundaries: A list of floats gives the boundaries. It has to be sorted.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `Tensor` with type int32 which indicates the corresponding bucket for
      each value in `input_tensor`.
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=unused-import
from tensorflow.contrib.periodic_resample.python.ops import gen_periodic_resample_op

from tensorflow.contrib.periodic_resample.python.ops.gen_periodic_resample_op import periodic_resample

from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=unused-import

_periodic_resample_op = loader.load_op_library(
    resource_loader.get_path_to_datafile('_periodic_resample_op.so'))
Esempio n. 20
0
# ==============================================================================
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import threading

from tensorflow.contrib.nccl.ops import gen_nccl_ops
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_nccl_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile('_nccl_ops.so'))


def all_sum(tensors):
  """Returns a list of tensors with the all-reduce sum across `tensors`.

  The computation is done with an all-reduce operation, so if only some of the
  returned tensors are evaluated then the computation will hang.

  Args:
    tensors: The input tensors across which to sum; must be assigned
      to GPU devices.

  Returns:
    List of tensors, each with the sum of the input tensors, where tensor i has
    the same device as `tensors[i]`.
Esempio n. 21
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import platform

from tensorflow.contrib.tpu.python.tpu import gen_functional_ops


TPUPartitionedCall = gen_functional_ops._tpu_partitioned_call  # pylint: disable=invalid-name,protected-access


if platform.system() != "Windows":
  # pylint: disable=wildcard-import,unused-import,g-import-not-at-top
  from tensorflow.contrib.tpu.ops.gen_tpu_ordinal_selector_op import *

  from tensorflow.contrib.util import loader
  from tensorflow.python.platform import resource_loader
  # pylint: enable=wildcard-import,unused-import,g-import-not-at-top

  _tpu_partitioned_call_op = loader.load_op_library(
      resource_loader.get_path_to_datafile("../ops/_functional_ops.so")
  )
Esempio n. 22
0
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Beam Search helper ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.seq2seq.ops import gen_beam_search_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_beam_search_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_beam_search_ops.so"))

gather_tree = gen_beam_search_ops.gather_tree
Esempio n. 23
0
import collections
import numbers

from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf

# pylint: disable=wildcard-import,undefined-variable
from tensorflow.contrib.factorization.python.ops.gen_factorization_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.platform import resource_loader

_factorization_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_factorization_ops.so"))


class WALSModel(object):
    r"""A model for Weighted Alternating Least Squares matrix factorization.

  It minimizes the following loss function over U, V:
   \\( ||W \odot (A - U V^T) ||_F^2 + \lambda (||U||_F^2 + ||V||_F^2) )\\
    where,
    A: input matrix,
    W: weight matrix,
    U, V: row_factors and column_factors matrices,
    \\(\lambda)\\: regularization.
  Also we assume that W is of the following special form:
  \\( W_{ij} = W_0 + R_i * C_j )\\  if \\(A_{ij} \ne 0)\\,
  \\(W_{ij} = W_0)\\ otherwise.
Esempio n. 24
0
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom ops used by tensorforest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.tensor_forest.python.ops.gen_tensor_forest_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_tensor_forest_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile('_tensor_forest_ops.so'))
Esempio n. 25
0
from __future__ import print_function
import itertools

from tensorflow.contrib.mkldnn_rnn.ops import gen_mkldnn_rnn_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver

_mkldnn_rnn_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_mkldnn_rnn_ops.so"))

_mkldnn_rnn_common_doc_string = """
  Mkldnn RNN has an opaque parameter buffer that can be used for inference and
  training. But it is possible that the layout of the parameter buffers
  changes between generations. So it is highly recommended to use
  RNNParamsSaveable to save and restore weights and biases in a canonical
  format.

  This is a typical use case:
    * The user creates a MkldnnRNN model.
    * The user query that parameter buffer size.
    * The user creates a variable of that size that serves as the parameter
        buffers.
    * The user either initialize the parameter buffer, or load the canonical
        weights into the parameter buffer.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the Python wrapper of TRTEngineOp."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import platform

if platform.system() != "Windows":
  # pylint: disable=wildcard-import,unused-import,g-import-not-at-top
  from tensorflow.contrib.tensorrt.ops.gen_trt_engine_op import *

  from tensorflow.contrib.util import loader
  from tensorflow.python.platform import resource_loader
  # pylint: enable=wildcard-import,unused-import,g-import-not-at-top

  _trt_engine_op = loader.load_op_library(
      resource_loader.get_path_to_datafile("_trt_engine_op.so"))
else:
  raise RuntimeError("Windows platforms are not supported")
Esempio n. 27
0
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading kinesis ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_dataset_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AudioMicrofrontend Op creates filterbanks from audio data."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

from tensorflow.contrib.lite.experimental.microfrontend.ops import gen_audio_microfrontend_op
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_audio_microfrontend_op = loader.load_op_library(
    resource_loader.get_path_to_datafile("_audio_microfrontend_op.so"))


def audio_microfrontend(audio,
                        sample_rate=16000,
                        window_size=25,
                        window_step=10,
                        num_channels=32,
                        upper_band_limit=7500.0,
                        lower_band_limit=125.0,
                        smoothing_bits=10,
                        even_smoothing=0.025,
                        odd_smoothing=0.06,
                        min_signal_remaining=0.05,
                        enable_pcan=True,
                        pcan_strength=0.95,
Esempio n. 29
0
# =============================================================================
"""Encoding and decoding audio using FFmpeg."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.ffmpeg.ops import gen_decode_audio_op_py
from tensorflow.contrib.ffmpeg.ops import gen_decode_video_op_py
from tensorflow.contrib.ffmpeg.ops import gen_encode_audio_op_py
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.deprecation import deprecated

_ffmpeg_so = loader.load_op_library(
    resource_loader.get_path_to_datafile('ffmpeg.so'))


@deprecated('2018-09-04', 'This will be deleted and should not be used.')
def decode_audio(contents, file_format=None, samples_per_second=None,
                 channel_count=None, stream=None):
  """Create an op that decodes the contents of an audio file.

  Note that ffmpeg is free to select the "best" audio track from an mp4.
  https://trac.ffmpeg.org/wiki/Map

  Args:
    contents: The binary contents of the audio file to decode. This is a
        scalar.
    file_format: A string or scalar string tensor specifying which
        format the contents will conform to. This can be mp3, mp4, ogg,
# ==============================================================================
"""Wrappers for sparse cross operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader

_sparse_feature_cross_op = loader.load_op_library(
    resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so"))

# Default hash key for the FingerprintCat64.
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE


@deprecated_arg_values(
    "2016-11-20",
    "The default behavior of sparse_feature_cross is changing, the default\n"
    "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n"
    "From that point on sparse_feature_cross will always use FingerprintCat64\n"
    "to concatenate the feature fingerprints. And the underlying\n"
    "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n"
    "as deprecated.",
    hash_key=None)
def sparse_feature_cross(inputs,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.image.ops import gen_single_image_random_dot_stereograms_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_sirds_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile(
        "_single_image_random_dot_stereograms.so"))


def single_image_random_dot_stereograms(depth_values,
                                        hidden_surface_removal=None,
                                        convergence_dots_size=None,
                                        dots_per_inch=None,
                                        eye_separation=None,
                                        mu=None,
                                        normalize=None,
                                        normalize_max=None,
                                        normalize_min=None,
                                        border_level=None,
                                        number_colors=None,
                                        output_image_shape=None,
Esempio n. 32
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import and conditionally load custom ops for training boosted trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=wildcard-import
from tensorflow.contrib.boosted_trees.python.ops.gen_training_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader

# Conditionally load ops, they might already be statically linked in.
try:
  _training_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile('_training_ops.so'))
except (errors.NotFoundError, IOError):
  print('Error loading _training_ops.so')
Esempio n. 33
0
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Encoding and decoding audio using FFmpeg."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.ffmpeg.ops import gen_decode_audio_op_py
from tensorflow.contrib.ffmpeg.ops import gen_encode_audio_op_py
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_ffmpeg_so = loader.load_op_library(
    resource_loader.get_path_to_datafile('ffmpeg.so'))


def decode_audio(contents, file_format=None, samples_per_second=None,
                 channel_count=None):
  """Create an op that decodes the contents of an audio file.

  Note that ffmpeg is free to select the "best" audio track from an mp4.
  https://trac.ffmpeg.org/wiki/Map

  Args:
    contents: The binary contents of the audio file to decode. This is a
        scalar.
    file_format: A string specifying which format the contents will conform
        to. This can be mp3, mp4, ogg, or wav.
    samples_per_second: The number of samples per second that is assumed.
Esempio n. 34
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from six import iteritems

from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.util import loader
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader

_bigtable_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_bigtable.so"))


class BigtableClient(object):
  """BigtableClient is the entrypoint for interacting with Cloud Bigtable in TF.

  BigtableClient encapsulates a connection to Cloud Bigtable, and exposes the
  `table` method to open a Bigtable Table.
  """

  def __init__(self, project_id, instance_id, connection_pool_size=None):
    """Creates a BigtableClient that can be used to open connections to tables.

    Args:
      project_id: A string representing the GCP project id to connect to.
      instance_id: A string representing the Bigtable instance to connect to.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads the _boosted_trees_ops.so when the binary is not statically linked."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader

# Conditionally load ops, they might already be statically linked in.
try:
  loader.load_op_library(
      resource_loader.get_path_to_datafile('_boosted_trees_ops.so'))
except (errors.NotFoundError, IOError):
  print('Error loading _boosted_trees_ops.so')
Esempio n. 36
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split handler custom ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=wildcard-import
from tensorflow.contrib.boosted_trees.python.ops.gen_ensemble_optimizer_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader

# Conditionally load ops, they might already be statically linked in.
try:
  _ensemble_optimizer_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile('_ensemble_optimizer_ops.so'))
except (errors.NotFoundError, IOError):
  print('Error loading _ensemble_optimizer_ops.so')
Esempio n. 37
0
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading Ignite ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_dataset_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("../../_ignite_ops.so"))
"""Wrappers for sparse cross operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function


from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader

_sparse_feature_cross_op = loader.load_op_library(
    resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so"))

# Default hash key for the FingerprintCat64.
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE


@deprecated_arg_values(
    "2016-11-20",
    "The default behavior of sparse_feature_cross is changing, the default\n"
    "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n"
    "From that point on sparse_feature_cross will always use FingerprintCat64\n"
    "to concatenate the feature fingerprints. And the underlying\n"
    "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n"
    "as deprecated.",
    hash_key=None)
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
Esempio n. 39
0
"""Tests for Bigtable Ops."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib import bigtable
from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.bigtable.ops import gen_bigtable_test_ops
from tensorflow.contrib.util import loader
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.util import compat

_bigtable_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_bigtable_test.so"))


class BigtableOpsTest(test.TestCase):
  COMMON_ROW_KEYS = ["r1", "r2", "r3"]
  COMMON_VALUES = ["v1", "v2", "v3"]

  def setUp(self):
    self._client = gen_bigtable_test_ops.bigtable_test_client()
    table = gen_bigtable_ops.bigtable_table(self._client, "testtable")
    self._table = bigtable.BigTable("testtable", None, table)

  def _makeSimpleDataset(self):
    output_rows = dataset_ops.Dataset.from_tensor_slices(self.COMMON_ROW_KEYS)
    output_values = dataset_ops.Dataset.from_tensor_slices(self.COMMON_VALUES)
    return dataset_ops.Dataset.zip((output_rows, output_values))
Esempio n. 40
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.memory_stats.ops import gen_memory_stats_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_memory_stats_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_memory_stats_ops.so"))


def MaxBytesInUse():
  """Generates an op that computes the peak memory of a device."""
  return gen_memory_stats_ops.max_bytes_in_use()
Esempio n. 41
0
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging

if platform.system() != "Windows":
  # pylint: disable=wildcard-import,unused-import,g-import-not-at-top
  from tensorflow.contrib.tpu.ops import gen_tpu_ops
  from tensorflow.contrib.tpu.ops.gen_tpu_ops import *

  from tensorflow.contrib.util import loader
  from tensorflow.python.platform import resource_loader
  # pylint: enable=wildcard-import,unused-import,g-import-not-at-top

  _tpu_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile("_tpu_ops.so"))

  def _create_default_group_assignment():
    num_shards = tpu_function.get_tpu_context().number_of_shards
    if num_shards is None:
      logging.warning(
          "cross_replica_sum should be used within a tpu_shard_context, but "
          "got unset number_of_shards. Assuming 1.")
      num_shards = 1
    group_assignment = [list(range(num_shards))]
    return group_assignment

  def all_to_all(x,
                 concat_dimension,
                 split_dimension,
                 split_count,
Esempio n. 42
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Tensorflow op performing differentiable resampling."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_resampler_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_resampler_ops.so"))


def resampler(data, warp, name="resampler"):
  """Resamples input data at user defined coordinates.

  The resampler currently only supports bilinear interpolation of 2D data.

  Args:
    data: Tensor of shape `[batch_size, data_height, data_width,
      data_num_channels]` containing 2D data that will be resampled.
    warp: Tensor of minimum rank 2 containing the coordinates at which
      resampling will be performed. Since only bilinear interpolation is
      currently supported, the last dimension of the `warp` tensor must be 2,
      representing the (x, y) coordinate where x is the index for width and y is
      the index for height.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math

from tensorflow.contrib.framework.python.ops import gen_checkpoint_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader

_checkpoint_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_checkpoint_ops.so"))

ops.NotDifferentiable("GenerateVocabRemapping")
ops.NotDifferentiable("LoadAndRemapMatrix")


def _load_and_remap_matrix(ckpt_path,
                           old_tensor_name,
                           new_row_vocab_offset,
                           num_rows_to_load,
                           new_col_vocab_size,
                           initializer,
                           old_row_vocab_file=None,
                           new_row_vocab_file=None,
                           old_col_vocab_file=None,
                           new_col_vocab_file=None,
Esempio n. 44
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for nearest neighbor operations."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_nearest_neighbor_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_nearest_neighbor_ops.so"))


def hyperplane_lsh_probes(point_hyperplane_product,
                          num_tables,
                          num_hyperplanes_per_table,
                          num_probes,
                          name=None):
    """Computes probes for the hyperplane hash.

  The op supports multiprobing, i.e., the number of requested probes can be
  larger than the number of tables. In that case, the same table can be probed
  multiple times.

  The first `num_tables` probes are always the primary hashes for each table.
Esempio n. 45
0
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import threading

from tensorflow.contrib.nccl.ops import gen_nccl_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import resource_loader

_nccl_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile('_nccl_ops.so'))


def all_sum(tensors):
    """Returns a list of tensors with the all-reduce sum across `tensors`.

  The computation is done with an all-reduce operation, so if only some of the
  returned tensors are evaluated then the computation will hang.

  Args:
    tensors: The input tensors across which to sum; must be assigned
      to GPU devices.

  Returns:
    List of tensors, each with the sum of the input tensors, where tensor i has
    the same device as `tensors[i]`.
Esempio n. 46
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Loader for the custom inc_op."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import platform

if platform.system() != "Windows":
  # pylint: disable=g-import-not-at-top
  from tensorflow.contrib.util import loader
  from tensorflow.python.platform import resource_loader
  # pylint: enable=g-import-not-at-top

  _inc_op = loader.load_op_library(
      resource_loader.get_path_to_datafile("_inc_op.so"))
else:
  raise RuntimeError("Windows not supported")
Esempio n. 47
0
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging

if platform.system() != "Windows":
    # pylint: disable=wildcard-import,unused-import,g-import-not-at-top
    from tensorflow.contrib.tpu.ops import gen_tpu_ops
    from tensorflow.contrib.tpu.ops.gen_tpu_ops import *

    from tensorflow.contrib.util import loader
    from tensorflow.python.platform import resource_loader
    # pylint: enable=wildcard-import,unused-import,g-import-not-at-top

    _tpu_ops = loader.load_op_library(
        resource_loader.get_path_to_datafile("_tpu_ops.so"))

    def _create_default_group_assignment():
        num_shards = tpu_function.get_tpu_context().number_of_shards
        if num_shards is None:
            logging.warning(
                "cross_replica_sum should be used within a tpu_shard_context, but "
                "got unset number_of_shards. Assuming 1.")
            num_shards = 1
        group_assignment = [list(range(num_shards))]
        return group_assignment

    def all_to_all(x,
                   concat_dimension,
                   split_dimension,
                   split_count,
Esempio n. 48
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import random

from tensorflow.contrib.input_pipeline.ops import gen_input_pipeline_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader


_input_pipeline_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))


def obtain_next(string_list_tensor, counter):
  """Basic wrapper for the ObtainNextOp.

  Args:
    string_list_tensor: A tensor that is a list of strings
    counter: an int64 ref tensor to keep track of which element is returned.

  Returns:
    An op that produces the element at counter + 1 in the list, round
    robin style.
  """
  return gen_input_pipeline_ops.obtain_next(string_list_tensor, counter)
Esempio n. 49
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.data.python.ops import gen_prefetching_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_prefetching_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("../../_prefetching_ops.so"))


# TODO (rohanj): Add a python class that constructs resource in the __init__ id:535 gh:536
# method and provides a get_next() that calls the prefetch op.
def function_buffering_resource(string_arg,
                                target_device,
                                shared_name,
                                f,
                                buffer_size,
                                thread_pool_size=1,
                                container="",
                                name=None):
    return gen_prefetching_ops.function_buffering_resource(
        string_arg=string_arg,
        target_device=target_device,
Esempio n. 50
0
  def restore(self, restored_tensors, unused_restored_shapes):
    """Restores the associated tree ensemble from 'restored_tensors'.

    Args:
      restored_tensors: the tensors that were loaded from a checkpoint.
      unused_restored_shapes: the shapes this object should conform to after
        restore. Not meaningful for trees.

    Returns:
      The operation that restores the state of the tree ensemble variable.
    """
    with ops.control_dependencies([self._create_op]):
      return self.deserialize(
          stamp_token=restored_tensors[0],
          num_updates=restored_tensors[1],
          partition_ids=restored_tensors[2],
          feature_ids=restored_tensors[3],
          gradients=restored_tensors[4],
          hessians=restored_tensors[5])

  def resource(self):
    return self._resource_handle


# Conditionally load ops, they might already be statically linked in.
try:
  _stats_accumulator_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile("_stats_accumulator_ops.so"))
except (errors.NotFoundError, IOError):
  print("Error loading _stats_accumulator_ops.so")
Esempio n. 51
0
import abc

from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader

_lstm_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_lstm_ops.so"))


# pylint: disable=invalid-name
def _lstm_block_cell(x,
                     cs_prev,
                     h_prev,
                     w,
                     b,
                     wci=None,
                     wcf=None,
                     wco=None,
                     forget_bias=None,
                     cell_clip=None,
                     use_peephole=None,
                     name=None):
Esempio n. 52
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the reduce slice operators."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.reduce_slice_ops.ops import gen_reduce_slice_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader


_reduce_slice_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_reduce_slice_ops.so"))


reduce_slice_sum = gen_reduce_slice_ops.reduce_slice_sum
reduce_slice_prod = gen_reduce_slice_ops.reduce_slice_prod
reduce_slice_max = gen_reduce_slice_ops.reduce_slice_max
reduce_slice_min = gen_reduce_slice_ops.reduce_slice_min
Esempio n. 53
0
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader

_clustering_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile('_clustering_ops.so'))

# Euclidean distance between vectors U and V is defined as ||U - V||_F which is
# the square root of the sum of the absolute squares of the elements difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# 1 - (U \dot V) / (||U||_F ||V||_F)
COSINE_DISTANCE = 'cosine'

RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'


class KMeans(object):
  """Creates the graph for k-means clustering."""
Esempio n. 54
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.memory_stats.ops import gen_memory_stats_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_memory_stats_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_memory_stats_ops.so"))


def BytesInUse():
  """Generates an op that computes the current memory of a device."""
  return gen_memory_stats_ops.bytes_in_use()


def BytesLimit():
  """Generates an op that measures the total memory (in bytes) of a device."""
  return gen_memory_stats_ops.bytes_limit()


def MaxBytesInUse():
  """Generates an op that computes the peak memory of a device."""
  return gen_memory_stats_ops.max_bytes_in_use()