Example #1
0
def get_tf_libs(build_ext, lib_dirs, cpp_flags):
    last_err = None
    for tf_libs in [['tensorflow_framework'], []]:
        try:
            lib_file = test_compile(build_ext,
                                    'test_tensorflow_libs',
                                    library_dirs=lib_dirs,
                                    libraries=tf_libs,
                                    extra_compile_preargs=cpp_flags,
                                    code=textwrap.dedent('''\
                    void test() {
                    }
                    '''))

            from tensorflow.python.framework import load_library
            load_library.load_op_library(lib_file)

            return tf_libs
        except (CompileError, LinkError):
            last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'
        except Exception:
            last_err = 'Unable to determine -l link flags to use with TensorFlow.  ' \
                       'Last error:\n\n%s' % traceback.format_exc()

    raise DistutilsPlatformError(last_err)
Example #2
0
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
    last_err = None
    cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
    for cxx11_abi in ['0', '1']:
        try:
            lib_file = test_compile(build_ext, 'test_tensorflow_abi',
                                    macros=[(cxx11_abi_macro, cxx11_abi)],
                                    include_dirs=include_dirs, library_dirs=lib_dirs,
                                    libraries=libs, extra_preargs=cpp_flags,
                                    code=textwrap.dedent('''\
                #include <string>
                #include "tensorflow/core/framework/op.h"
                #include "tensorflow/core/framework/op_kernel.h"
                #include "tensorflow/core/framework/shape_inference.h"
                void test() {
                    auto ignore = tensorflow::strings::StrCat("a", "b");
                }
                '''))

            from tensorflow.python.framework import load_library
            load_library.load_op_library(lib_file)

            return cxx11_abi_macro, cxx11_abi
        except (CompileError, LinkError):
            last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
        except Exception:
            last_err = 'Unable to determine CXX11 ABI to use with TensorFlow.  ' \
                       'Last error:\n\n%s' % traceback.format_exc()

    raise DistutilsPlatformError(last_err)
    def testBasic(self):
        library_filename = os.path.join(resource_loader.get_data_files_path(),
                                        'duplicate_op.so')
        load_library.load_op_library(library_filename)

        with self.cached_session():
            self.assertEqual(math_ops.add(1, 41).eval(), 42)
Example #4
0
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
    last_err = None
    cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
    for cxx11_abi in ['0', '1']:
        try:
            lib_file = test_compile(build_ext, 'test_tensorflow_abi',
                                    macros=[(cxx11_abi_macro, cxx11_abi)],
                                    include_dirs=include_dirs, library_dirs=lib_dirs,
                                    libraries=libs, extra_compile_preargs=cpp_flags,
                                    code=textwrap.dedent('''\
                #include <string>
                #include "tensorflow/core/framework/op.h"
                #include "tensorflow/core/framework/op_kernel.h"
                #include "tensorflow/core/framework/shape_inference.h"
                void test() {
                    auto ignore = tensorflow::strings::StrCat("a", "b");
                }
                '''))

            from tensorflow.python.framework import load_library
            load_library.load_op_library(lib_file)

            return cxx11_abi_macro, cxx11_abi
        except (CompileError, LinkError):
            last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
        except Exception:
            last_err = 'Unable to determine CXX11 ABI to use with TensorFlow.  ' \
                       'Last error:\n\n%s' % traceback.format_exc()

    raise DistutilsPlatformError(last_err)
Example #5
0
def _load_library(name, op_list=None):
    """Loads a .so file containing the specified operators.

  Args:
    name: The name of the .so file to load.
    op_list: A list of names of operators that the library should have. If None
        then the .so file's contents will not be verified.

  Raises:
    NameError if one of the required ops is missing.
  """
    try:
        filename = resource_loader.get_path_to_datafile(name)
        library = load_library.load_op_library(filename)
        for expected_op in (op_list or []):
            for lib_op in library.OP_LIST.op:
                if lib_op.name == expected_op:
                    break
            else:
                raise NameError(
                    'Could not find operator %s in dynamic library %s' %
                    (expected_op, name))
        return library
    except errors.NotFoundError:
        logging.warning('%s file could not be loaded.', name)
Example #6
0
def load_op_library(path):
  """Loads a contrib op library from the given path.

  NOTE(mrry): On Windows, we currently assume that some contrib op
  libraries are statically linked into the main TensorFlow Python
  extension DLL - use dynamically linked ops if the .so is present.

  Args:
    path: An absolute path to a shared object file.

  Returns:
    A Python module containing the Python wrappers for Ops defined in the
    plugin.
  """
  if os.name == 'nt':
    # To avoid makeing every user_ops aware of windows, re-write
    # the file extension from .so to .dll.
    path = re.sub(r'\.so$', '.dll', path)

    # Currently we have only some user_ops as dlls on windows - don't try
    # to load them if the dll is not found.
    # TODO(mrry): Once we have all of them this check should be removed.
    if not os.path.exists(path):
      return None
  path = resource_loader.get_path_to_datafile(path)
  ret = load_library.load_op_library(path)
  assert ret, 'Could not load %s' % path
  return ret
Example #7
0
def _maybe_load_sdca_ops():
  with _sdca_ops_lock:
    global _sdca_ops
    if not _sdca_ops:
      _sdca_ops = load_op_library(os.path.join(
          resource_loader.get_data_files_path(), '_sdca_ops.so'))
      assert _sdca_ops, 'Could not load _sdca_ops.so'
Example #8
0
def load_op_library(path):
    """Loads a contrib op library from the given path.

  NOTE(mrry): On Windows, we currently assume that some contrib op
  libraries are statically linked into the main TensorFlow Python
  extension DLL - use dynamically linked ops if the .so is present.

  Args:
    path: An absolute path to a shared object file.

  Returns:
    A Python module containing the Python wrappers for Ops defined in the
    plugin.
  """
    if os.name == 'nt':
        # To avoid makeing every user_ops aware of windows, re-write
        # the file extension from .so to .dll.
        path = re.sub('\.so$', '.dll', path)

        # TODO: currently we have only some user_ops as .dll's on windows - don't try
        #   to load them if the dll is not found. Once we have all of them
        #   this check should be removed.
        if not os.path.exists(path):
            return None
    path = resource_loader.get_path_to_datafile(path)
    ret = load_library.load_op_library(path)
    assert ret, 'Could not load %s' % path
    return ret
Example #9
0
def load_trt_ops():
  """Load TF-TRT op libraries so if it hasn't been loaded already."""
  global _trt_ops_so

  if platform.system() == "Windows":
    raise RuntimeError("Windows platforms are not supported")

  with _module_lock:
    if _trt_ops_so:
      return

    # TODO(laigd): we should load TF-TRT kernels here as well after removing the
    # swig binding.
    try:
      # TODO(lagid): consider getting rid of these unused imports.
      # pylint: disable=unused-import,g-import-not-at-top,unused-variable
      from tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops import get_serialized_resource_op
      from tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops import trt_engine_op
      from tensorflow.python.framework import load_library
      from tensorflow.python.platform import resource_loader
      # pylint: enable=unused-import,g-import-not-at-top,unused-variable

      _trt_ops_so = load_library.load_op_library(
          resource_loader.get_path_to_datafile("_trt_ops.so"))
    except errors.NotFoundError as e:
      no_trt_message = (
          "**** Failed to initialize TensorRT. This is either because the "
          "TensorRT installation path is not in LD_LIBRARY_PATH, or because "
          "you do not have it installed. If not installed, please go to "
          "https://developer.nvidia.com/tensorrt to download and install "
          "TensorRT ****")
      print(no_trt_message)
      raise e
Example #10
0
def _load_library(name, op_list=None):
  """Loads a .so file containing the specified operators.

  Args:
    name: The name of the .so file to load.
    op_list: A list of names of operators that the library should have. If None
        then the .so file's contents will not be verified.

  Raises:
    NameError if one of the required ops is missing.
  """
  try:
    filename = resource_loader.get_path_to_datafile(name)
    library = load_library.load_op_library(filename)
    for expected_op in (op_list or []):
      for lib_op in library.OP_LIST.op:
        if lib_op.name == expected_op:
          break
      else:
        raise NameError(
          'Could not find operator %s in dynamic library %s' %
          (expected_op, name))
    return library
  except errors.NotFoundError:
    logging.warning('%s file could not be loaded.', name)
  def testBasic(self):
    library_filename = os.path.join(resource_loader.get_data_files_path(),
                                    'ackermann_op.so')
    ackermann = load_library.load_op_library(library_filename)

    with self.cached_session():
      self.assertEqual(ackermann.ackermann().eval(), b'A(m, 0) == A(m-1, 1)')
Example #12
0
def _maybe_load_sdca_ops():
  with _sdca_ops_lock:
    global _sdca_ops
    if not _sdca_ops:
      _sdca_ops = load_op_library(os.path.join(
          resource_loader.get_data_files_path(), '_sdca_ops.so'))
      assert _sdca_ops, 'Could not load _sdca_ops.so'
    def testBasic(self):
        library_filename = os.path.join(resource_loader.get_data_files_path(),
                                        'duplicate_op.so')
        duplicate = load_library.load_op_library(library_filename)

        self.assertEqual(len(duplicate.OP_LIST.op), 0)

        with self.test_session():
            self.assertEqual(math_ops.add(1, 41).eval(), 42)
Example #14
0
 def _load_platform_specific_library(self, lib_name):
     system = platform.system()
     if system == "Darwin":
         lib_file_name = lib_name + ".dylib"
     elif system == "Windows":
         lib_file_name = lib_name + ".dll"
     else:
         lib_file_name = lib_name + ".so"
     return load_library.load_op_library(lib_file_name)
Example #15
0
def _load_library(name):
    """Loads a .so file containing the specified operators.
    Args:
      name: The name of the .so file to load.
    Raises:
      NotFoundError if were not able to load .so file.
    """
    filename = resource_loader.get_path_to_datafile(name)
    library = load_library.load_op_library(filename)
    return library
Example #16
0
def Load():
    """Load training ops library and return the loaded module."""
    with _ops_lock:
        global _training_ops
        if not _training_ops:
            ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
            logging.info('data path: %s', ops_path)
            _training_ops = load_library.load_op_library(ops_path)

            assert _training_ops, 'Could not load _training_ops.so'
    return _training_ops
Example #17
0
    def testBasic(self):
        library_filename = os.path.join(resource_loader.get_data_files_path(),
                                        'ackermann_op.so')
        ackermann = load_library.load_op_library(library_filename)

        self.assertEqual(len(ackermann.OP_LIST.op), 1)
        self.assertEqual(ackermann.OP_LIST.op[0].name, 'Ackermann')

        with self.test_session():
            self.assertEqual(ackermann.ackermann().eval(),
                             b'A(m, 0) == A(m-1, 1)')
Example #18
0
def Load():
  """Load the inference ops library and return the loaded module."""
  with _ops_lock:
    global _inference_ops
    if not _inference_ops:
      ops_path = resource_loader.get_path_to_datafile(INFERENCE_OPS_FILE)
      logging.info('data path: %s', ops_path)
      _inference_ops = load_library.load_op_library(ops_path)

      assert _inference_ops, 'Could not load inference_ops.so'
  return _inference_ops
Example #19
0
def tfr_gen_from_module(source, method_prefix=None, op_libraries=None):
  """Parse a python code and emit the TFR functions from a target class."""
  op_defs = OpDefCache()

  if op_libraries:
    for m in op_libraries:
      lib_dir = os.path.dirname(m.__file__)
      prefix_len = len('gen_')
      lib_name = os.path.basename(m.__file__)[prefix_len:].replace('.py', '.so')
      # Load the op library so the op is added to the op registry. This is
      # required when the op cc_library couldn't be statically linked in open
      # source.
      # This is a no op if the op shared library couldn't be found in the same
      # directory of the op Python API.
      load_library.load_op_library(os.path.join(lib_dir, lib_name))

  mlir_funcs = [
      tfr_gen(func, op_defs)
      for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction)
      if not method_prefix or name.startswith(method_prefix)
  ]

  return '\n'.join(mlir_funcs + op_defs.mlir_external_funcs())
Example #20
0
def load_module(name):
    """Loads the module with the given name.

    First attempts to load the module as though it was embedded into the binary
    using Bazel. If that fails, then it attempts to load the module as though
    it was installed in site-packages via PIP.

    Args:
        name: The name of the module, e.g. "_tfq_simulate_ops.so"

    Returns:
        A python module containing the Python wrappers for the Ops.

    Raises:
        RuntimeError: If the library cannot be found.
    """
    try:
        path = resource_loader.get_path_to_datafile(name)
        return load_library.load_op_library(path)
    except:
        path = os.path.join(get_python_lib(), "tensorflow_quantum/core/ops",
                            name)
        return load_library.load_op_library(path)
Example #21
0
def tfr_gen_from_module(source, method_prefix=None, op_libraries=None):
  """Parse the input source module and emit the TFR functions."""
  op_defs = OpDefCache()

  # Load the op library so the op is added to the op registry. This is
  # required when the op cc_library couldn't be statically linked in open
  # source.
  # This is a no op if the op shared library couldn't be found in the same
  # directory of the op Python API.
  # TODO(fengliuai): make the .so file path configurable.
  if op_libraries:
    prefix_len = len('gen_')
    for m in op_libraries:
      lib_dir = os.path.dirname(m.__file__)
      lib_name = os.path.basename(m.__file__)[prefix_len:].replace('.py', '.so')
      lib_path = os.path.join(lib_dir, lib_name)
      if os.path.exists(lib_path):
        logging.info('load file: ' + lib_path)
        load_library.load_op_library(lib_path)
  else:
    # The op library is generated from the source module, then we load all the
    # .so file in the directory
    lib_dir = os.path.dirname(source.__file__)
    for lib_name in os.listdir(lib_dir):
      if lib_name.endswith('.so'):
        lib_path = os.path.join(lib_dir, lib_name)
        logging.info('load file: ' + lib_path)
        load_library.load_op_library(lib_path)

  mlir_funcs = [
      tfr_gen(func, op_defs)
      for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction)
      if not method_prefix or name.startswith(method_prefix)
  ]

  return '\n'.join(mlir_funcs + op_defs.mlir_external_funcs())
Example #22
0
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
    """Initialize 'ref' with all zeros, ref tensor should be uninitialized.
  If already initialized, you will get ValueError. This op is intended to
  save memory during initialization.
  Args:
    ref: ref of the tensor need to be zero initialized.
    name: optional name for this operation.
  Returns:
    ref that initialized.
  Raises:
    ValueError: If ref tensor is initialized.
  """
    _variable_ops = load_op_library(
        resource_loader.get_path_to_datafile("_variable_ops.so"))
    assert _variable_ops, "Could not load _variable_ops.so"
    return gen_variable_ops.zero_initializer(ref, name=name)
Example #23
0
def load_trt_ops():
    """Load TF-TRT op libraries so if it hasn't been loaded already."""
    global _tf_trt_so

    if not is_tensorrt_enabled():
        return

    if platform.system() == "Windows":
        raise RuntimeError("Windows platforms are not supported")

    with _module_lock:
        if _tf_trt_so:
            return

        try:
            # pylint: disable=g-import-not-at-top,unused-variable
            # This will call register_op_list() in
            # tensorflow/python/framework/op_def_registry.py, but it doesn't register
            # the op or the op kernel in C++ runtime.
            from tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops import trt_engine_op
            # pylint: enable=g-import-not-at-top,unused-variable
        except ImportError as e:
            print(
                "**** Failed to import TF-TRT ops. This is because the binary was "
                "not built with CUDA or TensorRT enabled. ****")
            raise e

        try:
            # pylint: disable=g-import-not-at-top
            from tensorflow.python.framework import load_library
            from tensorflow.python.platform import resource_loader
            # pylint: enable=g-import-not-at-top

            # Loading the shared object will cause registration of the op and the op
            # kernel if we link TF-TRT dynamically.
            _tf_trt_so = load_library.load_op_library(
                resource_loader.get_path_to_datafile("libtftrt.so"))
        except errors.NotFoundError as e:
            no_trt_message = (
                "**** Failed to initialize TensorRT. This is either because the "
                "TensorRT installation path is not in LD_LIBRARY_PATH, or because "
                "you do not have it installed. If not installed, please go to "
                "https://developer.nvidia.com/tensorrt to download and install "
                "TensorRT ****")
            print(no_trt_message)
            raise e
    def __init__(self, lib_path, func_name, output_dtype, output_shape):
        self.lib_path = lib_path
        self.func_name = func_name
        self.output_dtype = output_dtype

        # const(0) indicate invalid dynamic shape
        self.dynamic_output_shape = tf.constant(0, tf.int64)
        self.static_output_shape = None
        self.has_static_output_shape = False  # extra flag is required

        if self._is_static_shape(output_shape):
            self.static_output_shape = output_shape
            self.has_static_output_shape = True
        elif output_shape is not None:
            self.dynamic_output_shape = self._pack_shape_tensor(output_shape)

        self.module = load_library.load_op_library('tvm_dso_op.so')
        self.tvm_dso_op = self.module.tvm_dso_op
Example #25
0
def load_trt_ops():
    """Load TF-TRT op libraries so if it hasn't been loaded already."""
    global _tf_trt_so

    if platform.system() == "Windows":
        raise RuntimeError("Windows platforms are not supported")

    with _module_lock:
        if _tf_trt_so:
            return

        try:
            # pylint: disable=g-import-not-at-top,unused-variable
            # This registers the TRT ops, it doesn't require loading TRT library.
            from tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops import trt_engine_op
            # pylint: enable=g-import-not-at-top,unused-variable
        except ImportError as e:
            print(
                "**** Failed to import TF-TRT ops. This is because the binary was "
                "not built with CUDA or TensorRT enabled. ****")
            raise e

        # TODO(laigd): we should load TF-TRT kernels here as well after removing the
        # swig binding.
        try:
            # pylint: disable=g-import-not-at-top
            from tensorflow.python.framework import load_library
            from tensorflow.python.platform import resource_loader
            # pylint: enable=g-import-not-at-top

            _tf_trt_so = load_library.load_op_library(
                resource_loader.get_path_to_datafile("libtftrt.so"))
        except errors.NotFoundError as e:
            no_trt_message = (
                "**** Failed to initialize TensorRT. This is either because the "
                "TensorRT installation path is not in LD_LIBRARY_PATH, or because "
                "you do not have it installed. If not installed, please go to "
                "https://developer.nvidia.com/tensorrt to download and install "
                "TensorRT ****")
            print(no_trt_message)
            raise e
Example #26
0
    def __init__(self, lib_path, func_name, output_dtype, output_shape):
        self.lib_path = lib_path
        self.func_name = func_name
        self.output_dtype = output_dtype

        # const(0) indicate invalid dynamic shape
        self.dynamic_output_shape = tf.constant(0, tf.int64)
        self.static_output_shape = None
        self.has_static_output_shape = False  # extra flag is required

        if self._is_static_shape(output_shape):
            self.static_output_shape = output_shape
            self.has_static_output_shape = True
        elif output_shape is not None:
            self.dynamic_output_shape = self._pack_shape_tensor(output_shape)

        # TODO: support non-xpu device
        #self.device = device
        # delay initialization to called first time, where num input arguments is known
        self.tvm_dso_op = None
        self.module = load_library.load_op_library('tvm_dso_op.so')
Example #27
0
def load_op_library(path):
    """Loads a contrib op library from the given path.

  NOTE(mrry): On Windows, we currently assume that contrib op
  libraries are statically linked into the main TensorFlow Python
  extension DLL.

  Args:
    path: An absolute path to a shared object file.

  Returns:
    A Python module containing the Python wrappers for Ops defined in the
    plugin.
  """
    if os.name != 'nt':
        path = resource_loader.get_path_to_datafile(path)
        ret = load_library.load_op_library(path)
        assert ret, 'Could not load %s' % path
        return ret
    else:
        # NOTE(mrry):
        return None
Example #28
0
# coding=utf-8
"""Tensorflow lowercasing operation for UTF8 strings."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_tensor

from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
gen_normalize_ops = load_library.load_op_library(resource_loader.get_path_to_datafile('_normalize_ops.so'))


# pylint: disable=redefined-builtin
def case_fold_utf8(input, name=None):
  """Applies case folding to every UTF-8 string in the input.

  The input is a `Tensor` or `RaggedTensor` of any shape, and the resulting
  output has the same shape as the input. Note that NFKC normalization is
  implicitly applied to the strings.

  For example:

  >>> case_fold_utf8(['The   Quick-Brown',
  ...                 'CAT jumped over',
  ...                 'the lazy dog  !!  '])
Example #29
0
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Range coding operations."""

from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
from tensorflow_compression.python.ops import namespace_helper


ops = namespace_helper.get_ops(load_library.load_op_library(
    resource_loader.get_path_to_datafile(
        "../../cc/libtensorflow_compression.so")))

globals().update(ops)
__all__ = list(ops)
Example #30
0
from __future__ import print_function

import os
from absl import flags

import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.compiler.mlir.tfr.examples.mnist import gen_mnist_ops
from tensorflow.compiler.mlir.tfr.examples.mnist import ops_defs  # pylint: disable=unused-import
from tensorflow.python.framework import load_library

flags.DEFINE_integer('train_steps', 20, 'Number of steps in training.')

_lib_dir = os.path.dirname(gen_mnist_ops.__file__)
_lib_name = os.path.basename(gen_mnist_ops.__file__)[4:].replace('.py', '.so')
load_library.load_op_library(os.path.join(_lib_dir, _lib_name))

# MNIST dataset parameters.
num_classes = 10  # total classes (0-9 digits).
num_features = 784  # data features (img shape: 28*28).
num_channels = 1

# Training parameters.
learning_rate = 0.001
display_step = 10
batch_size = 32

# Network parameters.
n_hidden_1 = 32  # 1st conv layer number of neurons.
n_hidden_2 = 64  # 2nd conv layer number of neurons.
n_hidden_3 = 64  # 1st fully connected layer of neurons.
Example #31
0
# We need to revisit this later. We can automate that using cmake configure
# command.
TF_INSTALLED_VER = TF_VERSION.split('.')
TF_NEEDED_VER = TF_VERSION_NEEDED.split('.')

ngraph_classic_loaded = True
ngraph_bridge_lib = None
if (TF_INSTALLED_VER[0] == TF_NEEDED_VER[0]) and \
   (TF_INSTALLED_VER[1] == TF_NEEDED_VER[1]) and \
   ((TF_INSTALLED_VER[2].split('-'))[0] == (TF_NEEDED_VER[2].split('-'))[0]):
    libpath = os.path.dirname(__file__)

    if "NGRAPH_TF_USE_DEVICE_MODE" not in os.environ:
        full_lib_path = os.path.join(libpath, 'libngraph_bridge.' + ext)
        _ = load_library.load_op_library(full_lib_path)
        ngraph_bridge_lib = ctypes.cdll.LoadLibrary(full_lib_path)
    else:
        full_lib_path = os.path.join(libpath, 'libngraph_bridge_device.' + ext)
        _ = load_library.load_op_library(full_lib_path)
        ngraph_bridge_device_lib = ctypes.cdll.LoadLibrary(full_lib_path)
        ngraph_classic_loaded = False
else:
    raise ValueError(
        "Error: Installed TensorFlow version {0}\nnGraph bridge built with: {1}"
        .format(TF_VERSION, TF_VERSION_NEEDED))


def requested():
    return ops.get_default_graph()._attr_scope({
        "_ngraph_requested":
Example #32
0
# ============================================================================
"""Use time_two ops in python."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import functools
import math
import numpy as np

import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader

small_d_ops = load_library.load_op_library(
    resource_loader.get_path_to_datafile('libpwa_op.so'))

import nvtx.plugins.tf as tf_nvtx


def small_d(beta, j):
    beta, d_id = tf_nvtx.ops.start(beta, "small_d_matrix")
    w = small_d_weight(j)
    a, b = small_d_ops.small_d(beta, w, j)
    a = tf_nvtx.ops.end(a, d_id)
    return a


@functools.lru_cache()
def small_d_weight(j):  # the prefactor in the d-function of β
    """
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Azure filesystem ops in python"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader

azure_filesystem_ops = load_library.load_op_library(
    resource_loader.get_path_to_datafile('_azfs_ops.so'))
Example #34
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Double op is a user's defined op for testing purpose."""

from tensorflow.lite.python.testdata import double_op_wrapper
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader

_double_op = load_library.load_op_library(
    resource_loader.get_path_to_datafile('_double_op.so'))


def double(input_tensor):
    """Double op applies element-wise double to input data."""
    if (input_tensor.dtype != dtypes.int32
            and input_tensor.dtype != dtypes.float32):
        raise ValueError('Double op only accept int32 or float32 values.')
    return double_op_wrapper.double(input_tensor)