def construct_lanczos_params(self, k):
    """Constructs TF ops for storing Lanczos matrices.

    Args:
      k: number of iterations and dimensionality of the tridiagonal matrix
    """
    # Using autograph to automatically handle
    # the control flow of minimum_eigen_vector
    self.lanczos_eigen_vec = autograph.to_graph(utils.lanczos_decomp)

    def _vector_prod_fn(x):
      return self.dual_object.get_psd_product(x)

    # First, get the eigenvalue corresponding to largest magnitude
    self.alpha, self.beta, self.W = self.lanczos_eigen_vec(_vector_prod_fn,
                                                           0,
                                                           self.dual_object.matrix_m_dimension,
                                                           MIN_LANCZOS_ITER)
    self.eig_max_placeholder = tf.placeholder(tf.float32, shape=[])

    # Shift the matrix by the largest magnitude eigenvalue to compute
    # the smallest real eigenvalue
    self.alpha_hat, self.beta_hat, self.W_hat = self.lanczos_eigen_vec(_vector_prod_fn,
                                                                       self.eig_max_placeholder,
                                                                       self.dual_object.matrix_m_dimension,
                                                                       k)
Exemple #2
0
  def test_tf_lanczos_smallest_eigval(self):
    tf_num_iter = tf.placeholder(dtype=tf.int32, shape=())
    tf_matrix = tf.placeholder(dtype=tf.float32)
    def _vector_prod_fn(x):
      return tf.matmul(tf_matrix, tf.reshape(x, [-1, 1]))

    min_eigen_fn = autograph.to_graph(utils.tf_lanczos_smallest_eigval)
    tf_eigval, tf_eigvec = min_eigen_fn(
        _vector_prod_fn, MATRIX_DIMENTION, tf_num_iter, dtype=tf.float32)

    with self.test_session() as sess:
      # run this test for a few random matrices
      for _ in range(NUM_RANDOM_MATRICES):
        matrix = np.random.random((MATRIX_DIMENTION, MATRIX_DIMENTION))
        matrix = matrix + matrix.T  # symmetrizing matrix
        eigval, eigvec = sess.run(
            [tf_eigval, tf_eigvec],
            feed_dict={tf_num_iter: NUM_LZS_ITERATIONS, tf_matrix: matrix})

        scipy_min_eigval, scipy_min_eigvec = eigs(
            matrix, k=1, which='SR')
        scipy_min_eigval = np.real(scipy_min_eigval)
        scipy_min_eigvec = np.real(scipy_min_eigvec)
        scipy_min_eigvec = scipy_min_eigvec / np.linalg.norm(scipy_min_eigvec)

        np.testing.assert_almost_equal(eigval, scipy_min_eigval, decimal=3)
        np.testing.assert_almost_equal(np.linalg.norm(eigvec), 1.0)
        abs_dot_prod = abs(np.dot(eigvec.flatten(), scipy_min_eigvec.flatten()))
        np.testing.assert_almost_equal(abs_dot_prod, 1.0)
    def get_min_eig_vec_proxy(self, use_tf_eig=False):
        """Computes the min eigen value and corresponding vector of matrix M.

    Args:
      use_tf_eig: Whether to use tf's default full eigen decomposition

    Returns:
      eig_vec: Minimum absolute eigen value
      eig_val: Corresponding eigen vector
    """
        if use_tf_eig:
            # If smoothness parameter is too small, essentially no smoothing
            # Just output the eigen vector corresponding to min
            return tf.cond(self.smooth_placeholder < 1E-8, self.tf_min_eig_vec,
                           self.tf_smooth_eig_vec)

        # Using autograph to automatically handle
        # the control flow of minimum_eigen_vector
        min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)

        def _vector_prod_fn(x):
            return self.dual_object.get_psd_product(x)

        estimated_eigen_vector = min_eigen_tf(
            x=self.eig_init_vec_placeholder,
            num_steps=self.eig_num_iter_placeholder,
            learning_rate=self.params['eig_learning_rate'],
            vector_prod_fn=_vector_prod_fn)
        return estimated_eigen_vector
Exemple #4
0
    def test_runtime_error_rewriting_nested(self):
        def test_fn(x):
            def g(y):
                return y**2 // 0

            s = 0
            for xi in x:
                s += g(xi)
            return s

        compiled_fn = ag.to_graph(test_fn)

        # TODO(b/111408261): Nested functions currently do not rewrite correctly,
        # when they do we should change this test to check for the same traceback
        # properties as the other tests.  This should throw a runtime error with a
        # frame with "g" as the function name but because we don't yet add
        # try/except blocks to inner functions the name is "tf__g".
        with self.assertRaises(ag.TfRuntimeError) as error:
            with self.test_session() as sess:
                x = compiled_fn(tf.constant([4, 8]))
                with ag.improved_errors(compiled_fn):
                    sess.run(x)
        expected = error.exception
        custom_traceback = expected.custom_traceback
        num_tf_g_frames = 0
        for frame in custom_traceback:
            _, _, fn_name, _ = frame
            self.assertNotEqual('g', fn_name)
            num_tf_g_frames += int('tf__g' == fn_name)
        self.assertEqual(num_tf_g_frames, 1)
Exemple #5
0
  def test_runtime_error_rewriting_nested(self):

    def test_fn(x):

      def g(y):
        return y**2 // 0

      s = 0
      for xi in x:
        s += g(xi)
      return s

    compiled_fn = ag.to_graph(test_fn)

    # TODO(b/111408261): Nested functions currently do not rewrite correctly,
    # when they do we should change this test to check for the same traceback
    # properties as the other tests.  This should throw a runtime error with a
    # frame with "g" as the function name but because we don't yet add
    # try/except blocks to inner functions the name is "tf__g".
    with self.assertRaises(ag.TfRuntimeError) as error:
      with self.cached_session() as sess:
        x = compiled_fn(tf.constant([4, 8]))
        with ag.improved_errors(compiled_fn):
          sess.run(x)
    expected = error.exception
    custom_traceback = expected.custom_traceback
    num_tf_g_frames = 0
    for frame in custom_traceback:
      _, _, fn_name, _ = frame
      self.assertNotEqual('g', fn_name)
      num_tf_g_frames += int('tf__g' == fn_name)
    self.assertEqual(num_tf_g_frames, 1)
Exemple #6
0
    def construct_lanczos_params(self):
        """Computes matrices T and V using the Lanczos algorithm.

        Args:
          k: number of iterations and dimensionality of the tridiagonal matrix
        Returns:
          eig_vec: eigen vector corresponding to min eigenvalue
        """
        # Using autograph to automatically handle
        # the control flow of minimum_eigen_vector
        self.min_eigen_vec = autograph.to_graph(
            utils.tf_lanczos_smallest_eigval)

        def _m_vector_prod_fn(x):
            return self.get_psd_product(x, dtype=self.lanczos_dtype)

        def _h_vector_prod_fn(x):
            return self.get_h_product(x, dtype=self.lanczos_dtype)

        # Construct nodes for computing eigenvalue of M
        self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1),
                                           dtype=np.float64)
        zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1),
                           dtype=tf.float64)
        self.m_min_vec_ph = tf.placeholder_with_default(
            input=zeros_m,
            shape=(self.matrix_m_dimension, 1),
            name="m_min_vec_ph")
        self.m_min_eig, self.m_min_vec = self.min_eigen_vec(
            _m_vector_prod_fn,
            self.matrix_m_dimension,
            self.m_min_vec_ph,
            self.lzs_params["max_iter"],
            dtype=self.lanczos_dtype,
        )
        self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype)
        self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype)

        self.h_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension - 1,
                                                  1),
                                           dtype=np.float64)
        zeros_h = tf.zeros(shape=(self.matrix_m_dimension - 1, 1),
                           dtype=tf.float64)
        self.h_min_vec_ph = tf.placeholder_with_default(
            input=zeros_h,
            shape=(self.matrix_m_dimension - 1, 1),
            name="h_min_vec_ph")
        self.h_min_eig, self.h_min_vec = self.min_eigen_vec(
            _h_vector_prod_fn,
            self.matrix_m_dimension - 1,
            self.h_min_vec_ph,
            self.lzs_params["max_iter"],
            dtype=self.lanczos_dtype,
        )
        self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype)
        self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)
Exemple #7
0
    def test_minimum_eigen_vector(self):
        matrix = np.array([[1.0, 2.0], [2.0, 5.0]], dtype=np.float32)
        initial_vec = np.array([[1.0], [-1.0]], dtype=np.float32)

        def _vector_prod_fn(x):
            return tf.matmul(matrix, x)

        min_eigen_fn = autograph.to_graph(utils.minimum_eigen_vector)
        x = tf.compat.v1.placeholder(tf.float32, shape=(2, 1))
        min_eig_vec = min_eigen_fn(x, 10, 0.1, _vector_prod_fn)
        with self.test_session() as sess:
            v = sess.run(min_eig_vec, feed_dict={x: initial_vec})
            if v.flatten()[0] < 0:
                v = -v
        np.testing.assert_almost_equal(v, [[0.9239], [-0.3827]], decimal=4)
Exemple #8
0
    def test_graph_construction_error_rewriting_class(self):
        class TestClass(object):
            def test_fn(self):
                return tf.random_normal((2, 3), mean=0.0, dtype=tf.int32)

            def inner_caller(self):
                return self.test_fn()

            def caller(self):
                return self.inner_caller()

        # Note we expect a TypeError here because the traceback will not be
        # rewritten for classes.
        with self.assertRaises(TypeError):
            graph = ag.to_graph(TestClass)
            graph().caller()
Exemple #9
0
  def test_graph_construction_error_rewriting_class(self):

    class TestClass(object):

      def test_fn(self):
        return tf.random_normal((2, 3), mean=0.0, dtype=tf.int32)

      def inner_caller(self):
        return self.test_fn()

      def caller(self):
        return self.inner_caller()

    # Note we expect a TypeError here because the traceback will not be
    # rewritten for classes.
    with self.assertRaises(TypeError):
      graph = ag.to_graph(TestClass)
      graph().caller()
Exemple #10
0
    def get_min_eig_vec_proxy(self, use_tf_eig=False):
        """Computes the min eigen value and corresponding vector of matrix M.

    Args:
      use_tf_eig: Whether to use tf's default full eigen decomposition

    Returns:
      eig_vec: Minimum absolute eigen value
      eig_val: Corresponding eigen vector
    """
        if use_tf_eig:
            # If smoothness parameter is too small, essentially no smoothing
            # Just output the eigen vector corresponding to min
            return tf.cond(self.smooth_placeholder < 1E-8, self.tf_min_eig_vec,
                           self.tf_smooth_eig_vec)

        # Using autograph to automatically handle the control flow of multi_steps()
        multi_steps_tf = autograph.to_graph(self.multi_steps)
        estimated_eigen_vector = multi_steps_tf(self)
        return estimated_eigen_vector
  def construct_lanczos_params(self):
    """Computes matrices T and V using the Lanczos algorithm.

    Args:
      k: number of iterations and dimensionality of the tridiagonal matrix
    Returns:
      eig_vec: eigen vector corresponding to min eigenvalue
    """
    # Using autograph to automatically handle
    # the control flow of minimum_eigen_vector
    self.min_eigen_vec = autograph.to_graph(utils.lanczos_decomp)

    def _m_vector_prod_fn(x):
      return self.get_psd_product(x)
    def _h_vector_prod_fn(x):
      return self.get_h_product(x)

    # Construct nodes for computing eigenvalue of M
    self.alpha_m, self.beta_m, self.Q_m = self.min_eigen_vec(_m_vector_prod_fn,
                                                             0,
                                                             self.matrix_m_dimension,
                                                             self.lzs_params['min_iter'])

    self.eig_max_placeholder = tf.placeholder(tf.float32, shape=[])
    self.alpha_m_hat, self.beta_m_hat, self.Q_m_hat = self.min_eigen_vec(_m_vector_prod_fn,
                                                                         self.eig_max_placeholder,
                                                                         self.matrix_m_dimension,
                                                                         self.lzs_params['max_iter'])

    # Construct nodes for computing eigenvalue of H
    self.alpha_h, self.beta_h, self.Q_h = self.min_eigen_vec(_h_vector_prod_fn,
                                                             0,
                                                             self.matrix_m_dimension-1,
                                                             self.lzs_params['max_iter'])

    self.alpha_h_hat, self.beta_h_hat, self.Q_h_hat = self.min_eigen_vec(_h_vector_prod_fn,
                                                                         self.eig_max_placeholder,
                                                                         self.matrix_m_dimension-1,
                                                                         self.lzs_params['max_iter'])
Exemple #12
0
  def test_runtime_error_rewriting(self):

    def g(x, s):
      while tf.reduce_sum(x) > s:
        x //= 0
      return x

    def test_fn(x):
      return g(x, 10)

    compiled_fn = ag.to_graph(test_fn)

    with self.assertRaises(ag.TfRuntimeError) as error:
      with self.cached_session() as sess:
        x = compiled_fn(tf.constant([4, 8]))
        with ag.improved_errors(compiled_fn):
          sess.run(x)
    expected = error.exception
    custom_traceback = expected.custom_traceback
    found_correct_filename = False
    num_test_fn_frames = 0
    num_g_frames = 0
    ag_output_filename = tf_inspect.getsourcefile(compiled_fn)
    for frame in custom_traceback:
      filename, _, fn_name, source_code = frame
      self.assertFalse(ag_output_filename in filename)
      self.assertFalse('control_flow_ops.py' in filename)
      self.assertFalse('ag__.' in fn_name)
      self.assertFalse('tf__g' in fn_name)
      self.assertFalse('tf__test_fn' in fn_name)
      found_correct_filename |= __file__ in filename
      num_test_fn_frames += int('test_fn' == fn_name and
                                'return g(x, 10)' in source_code)
      # This makes sure that the code is correctly rewritten from "x_1 //= 0" to
      # "x //= 0".
      num_g_frames += int('g' == fn_name and 'x //= 0' in source_code)
    self.assertTrue(found_correct_filename)
    self.assertEqual(num_test_fn_frames, 1)
    self.assertEqual(num_g_frames, 1)
Exemple #13
0
  def test_graph_construction_error_rewriting_call_tree(self):

    def innermost(x):
      if x > 0:
        return tf.random_normal((2, 3), mean=0.0, dtype=tf.int32)
      return tf.zeros((2, 3))

    def inner_caller():
      return innermost(1.0)

    def caller():
      return inner_caller()

    with self.assertRaises(ag.GraphConstructionError) as error:
      graph = ag.to_graph(caller)
      graph()
    expected = error.exception
    custom_traceback = expected.custom_traceback
    found_correct_filename = False
    num_innermost_names = 0
    num_inner_caller_names = 0
    num_caller_names = 0
    ag_output_filename = tf_inspect.getsourcefile(graph)
    for frame in custom_traceback:
      filename, _, fn_name, _ = frame
      self.assertFalse('control_flow_ops.py' in filename)
      self.assertFalse(ag_output_filename in filename)
      found_correct_filename |= __file__ in filename
      self.assertNotEqual('tf__test_fn', fn_name)
      num_innermost_names += int('innermost' == fn_name)
      self.assertNotEqual('tf__inner_caller', fn_name)
      num_inner_caller_names += int('inner_caller' == fn_name)
      self.assertNotEqual('tf__caller', fn_name)
      num_caller_names += int('caller' == fn_name)
    self.assertTrue(found_correct_filename)
    self.assertEqual(num_innermost_names, 1)
    self.assertEqual(num_inner_caller_names, 1)
    self.assertEqual(num_caller_names, 1)
Exemple #14
0
import tensorflow as tf
layers = tf.keras.layers
from tensorflow.contrib import autograph

import numpy as np

tf.enable_eager_execution()


def square_if_positive(x):
    if x > 0:
        x = x * x
    else:
        x = 0.0
    return x


print(autograph.to_code(square_if_positive))

tf_square_if_positive = autograph.to_graph(square_if_positive)

with tf.Graph().as_default():
    # The result works like a regular op: takes tensors in, returns tensors.
    # You can inspect the graph using tf.get_default_graph().as_graph_def()
    g_out1 = tf_square_if_positive(tf.constant(9.0))
    g_out2 = tf_square_if_positive(tf.constant(-9.0))
    with tf.Session() as sess:
        print('Graph results: %2.2f, %2.2f\n' %
              (sess.run(g_out1), sess.run(g_out2)))
Exemple #15
0
    def test_basic(self):
        converted = ag.to_graph(list_used_as_tuple)
        result = converted()

        with self.test_session() as sess:
            self.assertAllEqual(sess.run(result), [1, 2, 3])
  def test_basic(self):
    converted = ag.to_graph(list_used_as_tuple)
    result = converted()

    with self.test_session() as sess:
      self.assertAllEqual(sess.run(result), [1, 2, 3])
graph = tf.Graph()
with graph.as_default():
    print(tf.executing_eagerly()) # False
#####################
# Use defun (or define graph-level) functions
import tensorflow as tf
tf.enable_eager_execution()
@tf.contrib.eager.defun
def square_sum(x, y):
    return tf.square(x+y)
result = square_sum(2., 3.)
print(result.numpy()) # 25

#####################
# autograph
import tensorflow as tf
tf.enable_eager_execution()
from tensorflow.contrib import autograph

def fib(n):
    a, b, = 0, 1
    for i in range(n-1):
        a,b = b, a+b
    return b
fib(10) #55
graph_fib = autograph.to_graph(fib)
# graph_fib also works in graph mode
result = graph_fib(10)
print(result) # 55
Exemple #18
0

def square_if_positive(x):
    if x > 0:
        x = x * x
    else:
        x = 0.0
    return x


print(autograph.to_code(square_if_positive))

print('Eager results: %2.2f, %2.2f' % (square_if_positive(
    tf.constant(9.0)), square_if_positive(tf.constant(-9.0))))

tf_square_if_positive = autograph.to_graph(square_if_positive)

with tf.Graph().as_default():
    # The result works like a regular op: takes tensors in, returns tensors.
    # You can inspect the graph using tf.get_default_graph().as_graph_def()
    g_out1 = tf_square_if_positive(tf.constant(9.0))
    g_out2 = tf_square_if_positive(tf.constant(-9.0))

    with tf.Session() as sess:
        print('Graph results: %2.2f, %2.2f\n' %
              (sess.run(g_out1), sess.run(g_out2)))


# Continue in a loop
def sum_even(items):
    s = 0