def is_mkl_enabled(self):
     major_version = int(tf.__version__.split(".")[0])
     if major_version >= 2:
         from tensorflow.python import _pywrap_util_port
         return _pywrap_util_port.IsMklEnabled()
     else:
         return tf.pywrap_tensorflow.IsMklEnabled()
def get_mkl_enabled_flag():
    mkl_enabled = False
    major_version = int(tf.__version__.split(".")[0])
    minor_version = int(tf.__version__.split(".")[1])
    if major_version >= 2:
        if minor_version < 5:
            from tensorflow.python import _pywrap_util_port
        else:
            from tensorflow.python.util import _pywrap_util_port
            onednn_enabled = int(os.environ.get('TF_ENABLE_ONEDNN_OPTS', '0'))
        mkl_enabled = _pywrap_util_port.IsMklEnabled() or (onednn_enabled == 1)
    else:
        mkl_enabled = tf.pywrap_tensorflow.IsMklEnabled()
    return mkl_enabled
Ejemplo n.º 3
0
def test_intel_tensorflow():
    """
    Check if Intel version of TensorFlow is installed
    """
    import tensorflow as tf

    print("We are using Tensorflow version {}".format(tf.__version__))

    major_version = int(tf.__version__.split(".")[0])
    if major_version >= 2:
        from tensorflow.python import _pywrap_util_port
        print("Intel-optimizations (DNNL) enabled:",
              _pywrap_util_port.IsMklEnabled())
    else:
        print("Intel-optimizations (DNNL) enabled:",
              tf.pywrap_tensorflow.IsMklEnabled())
Ejemplo n.º 4
0
def get_mkl_enabled_flag():
    """ Checks if the TensorFlow optimizations for Intel CPU architectures
    are enabled. The source for this sanity check comes from: 
    https://www.intel.com/content/www/us/en/developer/articles/guide/optimization-for-tensorflow-installation-guide.html


    Returns:
        bool: true if Intel optimizations are enabled
    """

    mkl_enabled = False
    major_version = int(tf.__version__.split(".")[0])
    minor_version = int(tf.__version__.split(".")[1])
    if major_version >= 2:
        if minor_version < 5:
            from tensorflow.python import _pywrap_util_port
        else:
            from tensorflow.python.util import _pywrap_util_port
            onednn_enabled = int(os.environ.get('TF_ENABLE_ONEDNN_OPTS', '0'))
        mkl_enabled = _pywrap_util_port.IsMklEnabled() or (onednn_enabled == 1)
    else:
        mkl_enabled = tf.pywrap_tensorflow.IsMklEnabled()
    return mkl_enabled
Ejemplo n.º 5
0
import os
import sys
import tensorflow as tf

os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'

print("TensorFlow version: ", tf.__version__)
print("Num GPU Available: ",
      len(tf.config.experimental.list_physical_devices('GPU')))

from tensorflow.python import _pywrap_util_port
print("MKL enabled:", _pywrap_util_port.IsMklEnabled())

tf.debugging.set_log_device_placement(False)

logdir = './logs/func2'
writer = tf.summary.create_file_writer(logdir)


@tf.function
def my_func(ax, bx):
    return tf.matmul(ax, bx)


A = tf.constant([[1, 2], [3, 4]], dtype=tf.float32, name='A')
x = tf.constant([[5, 6], [7, 8]], dtype=tf.float32, name='x')

# Bracket the function call with
# tf.summary.trace_on() and tf.summary.trace_export()
tf.summary.trace_on(graph=True, profiler=False)
z = my_func(A, x)