def test_tensorflow2_datatypes():
    # _NP_TO_TF contains all the mappings
    # of numpy to tf types
    try:
        from tensorflow.python import _pywrap_bfloat16

        # TF 2.x.x Implements a Custom Numpy Datatype for Brain Floating Type
        # Which is currently only supported on TPUs
        _np_bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()
        _NP_TO_TF.pop(_np_bfloat16)
    except (ModuleNotFoundError, ValueError, ImportError):
        pass

    for _type in _NP_TO_TF:
        try:
            _get_proto_dtype(np.dtype(_type))
        except Exception:
            assert False, f"{_type} not supported"
    assert True
Ejemplo n.º 2
0
from __future__ import division
from __future__ import print_function

import numpy as np
from six.moves import builtins

from tensorflow.core.framework import types_pb2
# We need to import pywrap_tensorflow prior to the bfloat wrapper to avoid
# protobuf errors where a file is defined twice on MacOS.
# pylint: disable=invalid-import-order,g-bad-import-order
from tensorflow.python import pywrap_tensorflow  # pylint: disable=unused-import
from tensorflow.python import _pywrap_bfloat16
from tensorflow.python import _dtypes
from tensorflow.python.util.tf_export import tf_export

_np_bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()


# pylint: disable=slots-on-old-class
@tf_export("dtypes.DType", "DType")
class DType(_dtypes.DType):
  """Represents the type of the elements in a `Tensor`.

  The following `DType` objects are defined:

  * `tf.float16`: 16-bit half-precision floating-point.
  * `tf.float32`: 32-bit single-precision floating-point.
  * `tf.float64`: 64-bit double-precision floating-point.
  * `tf.bfloat16`: 16-bit truncated floating-point.
  * `tf.complex64`: 64-bit single-precision complex.
  * `tf.complex128`: 128-bit double-precision complex.