def test_conjugate_gradient(self):
   np.random.seed(1)
   a_np = np.random.uniform(
       low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
   # Make a selfadjoint, positive definite.
   a_np = np.dot(a_np.T, a_np)
   # jacobi preconditioner
   jacobi_np = np.zeros_like(a_np)
   jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = (1.0 /
                                                            a_np.diagonal())
   rhs_np = np.random.uniform(
       low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
   x_np = np.zeros_like(rhs_np)
   tol = 1e-6 if dtype_ == np.float64 else 1e-3
   max_iter = 20
   with self.test_session() as sess:
     if use_static_shape_:
       a = constant_op.constant(a_np)
       rhs = constant_op.constant(rhs_np)
       x = constant_op.constant(x_np)
       jacobi = constant_op.constant(jacobi_np)
     else:
       a = array_ops.placeholder(dtype_)
       rhs = array_ops.placeholder(dtype_)
       x = array_ops.placeholder(dtype_)
       jacobi = array_ops.placeholder(dtype_)
     operator = util.create_operator(a)
     preconditioners = [None, util.identity_operator(a),
                        util.create_operator(jacobi)]
     cg_results = []
     for preconditioner in preconditioners:
       cg_graph = linear_equations.conjugate_gradient(
           operator, rhs, preconditioner=preconditioner,
           x=x, tol=tol, max_iter=max_iter)
       if use_static_shape_:
         cg_val = sess.run(cg_graph)
       else:
         cg_val = sess.run(cg_graph, feed_dict={a: a_np, rhs: rhs_np, x: x_np,
                                                jacobi: jacobi_np})
       norm_r0 = np.linalg.norm(rhs_np)
       norm_r = np.linalg.norm(cg_val.r)
       self.assertLessEqual(norm_r, tol * norm_r0)
       # Validate that we get an equally small residual norm with numpy
       # using the computed solution.
       r_np = rhs_np - np.dot(a_np, cg_val.x)
       norm_r_np = np.linalg.norm(r_np)
       self.assertLessEqual(norm_r_np, tol * norm_r0)
       cg_results.append(cg_val)
     # Validate that we get same results using identity_preconditioner
     # and None
     self.assertEqual(cg_results[0].i, cg_results[1].i)
     self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma)
     self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol)
     self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol)
     self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol)
Beispiel #2
0
 def _testCreateOperator(self, use_static_shape_):
   for dtype in np.float32, np.float64:
     a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
     x_np = np.array([[2.], [-3.]], dtype=dtype)
     y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
     with self.test_session() as sess:
       if use_static_shape_:
         a = constant_op.constant(a_np, dtype=dtype)
         x = constant_op.constant(x_np, dtype=dtype)
         y = constant_op.constant(y_np, dtype=dtype)
       else:
         a = array_ops.placeholder(dtype)
         x = array_ops.placeholder(dtype)
         y = array_ops.placeholder(dtype)
       op = util.create_operator(a)
       ax = op.apply(x)
       aty = op.apply_adjoint(y)
       op_shape = ops.convert_to_tensor(op.shape)
       if use_static_shape_:
         op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
       else:
         op_shape_val, ax_val, aty_val = sess.run(
             [op_shape, ax, aty], feed_dict={a: a_np,
                                             x: x_np,
                                             y: y_np})
     self.assertAllEqual(op_shape_val, [3, 2])
     self.assertAllClose(ax_val, np.dot(a_np, x_np))
     self.assertAllClose(aty_val, np.dot(a_np.T, y_np))
Beispiel #3
0
 def _testCreateOperator(self, use_static_shape_):
     for dtype in np.float32, np.float64:
         a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
         x_np = np.array([[2.], [-3.]], dtype=dtype)
         y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
         with self.test_session() as sess:
             if use_static_shape_:
                 a = constant_op.constant(a_np, dtype=dtype)
                 x = constant_op.constant(x_np, dtype=dtype)
                 y = constant_op.constant(y_np, dtype=dtype)
             else:
                 a = array_ops.placeholder(dtype)
                 x = array_ops.placeholder(dtype)
                 y = array_ops.placeholder(dtype)
             op = util.create_operator(a)
             ax = op.apply(x)
             aty = op.apply_adjoint(y)
             op_shape = ops.convert_to_tensor(op.shape)
             if use_static_shape_:
                 op_shape_val, ax_val, aty_val = sess.run(
                     [op_shape, ax, aty])
             else:
                 op_shape_val, ax_val, aty_val = sess.run(
                     [op_shape, ax, aty],
                     feed_dict={
                         a: a_np,
                         x: x_np,
                         y: y_np
                     })
         self.assertAllEqual(op_shape_val, [3, 2])
         self.assertAllClose(ax_val, np.dot(a_np, x_np))
         self.assertAllClose(aty_val, np.dot(a_np.T, y_np))
 def test_cgls(self):
   np.random.seed(1)
   a_np = np.random.uniform(
       low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
   rhs_np = np.random.uniform(
       low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
   tol = 1e-12 if dtype_ == np.float64 else 1e-6
   max_iter = 20
   with self.test_session() as sess:
     if use_static_shape_:
       a = constant_op.constant(a_np)
       rhs = constant_op.constant(rhs_np)
     else:
       a = array_ops.placeholder(dtype_)
       rhs = array_ops.placeholder(dtype_)
     operator = util.create_operator(a)
     cgls_graph = least_squares.cgls(operator, rhs, tol=tol, max_iter=max_iter)
     if use_static_shape_:
       cgls_val = sess.run(cgls_graph)
     else:
       cgls_val = sess.run(cgls_graph, feed_dict={a: a_np, rhs: rhs_np})
     # Below we use s = A^* (rhs - A x), s0 = A^* rhs
     norm_s0 = np.linalg.norm(np.dot(a_np.T, rhs_np))
     norm_s = np.sqrt(cgls_val.gamma)
     self.assertLessEqual(norm_s, tol * norm_s0)
     # Validate that we get an equally small residual norm with numpy
     # using the computed solution.
     r_np = rhs_np - np.dot(a_np, cgls_val.x)
     norm_s_np = np.linalg.norm(np.dot(a_np.T, r_np))
     self.assertLessEqual(norm_s_np, tol * norm_s0)
 def test_cgls(self):
   np.random.seed(1)
   a_np = np.random.uniform(
       low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
   rhs_np = np.random.uniform(
       low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
   tol = 1e-12 if dtype_ == np.float64 else 1e-6
   max_iter = 20
   with self.test_session() as sess:
     if use_static_shape_:
       a = tf.constant(a_np)
       rhs = tf.constant(rhs_np)
     else:
       a = tf.placeholder(dtype_)
       rhs = tf.placeholder(dtype_)
     operator = util.create_operator(a)
     cgls_graph = least_squares.cgls(operator, rhs, tol=tol, max_iter=max_iter)
     if use_static_shape_:
       cgls_val = sess.run(cgls_graph)
     else:
       cgls_val = sess.run(cgls_graph, feed_dict={a: a_np, rhs: rhs_np})
     # Below we use s = A^* (rhs - A x), s0 = A^* rhs
     norm_s0 = np.linalg.norm(np.dot(a_np.T, rhs_np))
     norm_s = np.sqrt(cgls_val.gamma)
     self.assertLessEqual(norm_s, tol * norm_s0)
     # Validate that we get an equally small residual norm with numpy
     # using the computed solution.
     r_np = rhs_np - np.dot(a_np, cgls_val.x)
     norm_s_np = np.linalg.norm(np.dot(a_np.T, r_np))
     self.assertLessEqual(norm_s_np, tol * norm_s0)
 def test_conjugate_gradient(self):
   np.random.seed(1)
   a_np = np.random.uniform(
       low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
   # Make a selfadjoint, positive definite.
   a_np = np.dot(a_np.T, a_np)
   rhs_np = np.random.uniform(
       low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
   tol = 1e-6 if dtype_ == np.float64 else 1e-3
   max_iter = 20
   with self.test_session() as sess:
     if use_static_shape_:
       a = constant_op.constant(a_np)
       rhs = constant_op.constant(rhs_np)
     else:
       a = array_ops.placeholder(dtype_)
       rhs = array_ops.placeholder(dtype_)
     operator = util.create_operator(a)
     cg_graph = linear_equations.conjugate_gradient(
         operator, rhs, tol=tol, max_iter=max_iter)
     if use_static_shape_:
       cg_val = sess.run(cg_graph)
     else:
       cg_val = sess.run(cg_graph, feed_dict={a: a_np, rhs: rhs_np})
     norm_r0 = np.linalg.norm(rhs_np)
     norm_r = np.sqrt(cg_val.gamma)
     self.assertLessEqual(norm_r, tol * norm_r0)
     # Validate that we get an equally small residual norm with numpy
     # using the computed solution.
     r_np = rhs_np - np.dot(a_np, cg_val.x)
     norm_r_np = np.linalg.norm(r_np)
     self.assertLessEqual(norm_r_np, tol * norm_r0)
 def test_conjugate_gradient(self):
     np.random.seed(1)
     a_np = np.random.uniform(
         low=-1.0, high=1.0,
         size=np.prod(shape_)).reshape(shape_).astype(dtype_)
     # Make a selfadjoint, positive definite.
     a_np = np.dot(a_np.T, a_np)
     rhs_np = np.random.uniform(low=-1.0, high=1.0,
                                size=shape_[0]).astype(dtype_)
     tol = 1e-6 if dtype_ == np.float64 else 1e-3
     max_iter = 20
     with self.test_session() as sess:
         if use_static_shape_:
             a = tf.constant(a_np)
             rhs = tf.constant(rhs_np)
         else:
             a = tf.placeholder(dtype_)
             rhs = tf.placeholder(dtype_)
         operator = util.create_operator(a)
         cg_graph = linear_equations.conjugate_gradient(operator,
                                                        rhs,
                                                        tol=tol,
                                                        max_iter=max_iter)
         if use_static_shape_:
             cg_val = sess.run(cg_graph)
         else:
             cg_val = sess.run(cg_graph, feed_dict={a: a_np, rhs: rhs_np})
         norm_r0 = np.linalg.norm(rhs_np)
         norm_r = np.sqrt(cg_val.gamma)
         self.assertLessEqual(norm_r, tol * norm_r0)
         # Validate that we get an equally small residual norm with numpy
         # using the computed solution.
         r_np = rhs_np - np.dot(a_np, cg_val.x)
         norm_r_np = np.linalg.norm(r_np)
         self.assertLessEqual(norm_r_np, tol * norm_r0)
Beispiel #8
0
  def test_lanczos_bidiag(self):
    np.random.seed(1)
    a_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
    tol = 1e-12 if dtype_ == np.float64 else 1e-5

    with self.cached_session() as sess:
      if use_static_shape_:
        a = constant_op.constant(a_np)
      else:
        a = array_ops.placeholder(dtype_)
      operator = util.create_operator(a)
      lbd = lanczos.lanczos_bidiag(
          operator, steps_, orthogonalize=orthogonalize_)

      # The computed factorization should satisfy the equations
      #  A * V = U * B
      #  A' * U[:, :-1] = V * B[:-1, :]'
      av = math_ops.matmul(a, lbd.v)
      ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
      atu = math_ops.matmul(a, lbd.u[:, :-1], adjoint_a=True)
      vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)

      if use_static_shape_:
        av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
      else:
        av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
                                                    feed_dict={a: a_np})
      self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
      self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
Beispiel #9
0
  def test_lanczos_bidiag(self):
    np.random.seed(1)
    a_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
    tol = 1e-12 if dtype_ == np.float64 else 1e-5

    with self.cached_session() as sess:
      if use_static_shape_:
        a = constant_op.constant(a_np)
      else:
        a = array_ops.placeholder(dtype_)
      operator = util.create_operator(a)
      lbd = lanczos.lanczos_bidiag(
          operator, steps_, orthogonalize=orthogonalize_)

      # The computed factorization should satisfy the equations
      #  A * V = U * B
      #  A' * U[:, :-1] = V * B[:-1, :]'
      av = math_ops.matmul(a, lbd.v)
      ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
      atu = math_ops.matmul(a, lbd.u[:, :-1], adjoint_a=True)
      vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)

      if use_static_shape_:
        av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
      else:
        av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
                                                    feed_dict={a: a_np})
      self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
      self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
Beispiel #10
0
 def _testCreateOperator(self, use_static_shape_):
   a_np = np.array([[1., 2.], [3., 4.], [5., 6.]])
   x = np.array([[2.], [-3.]])
   y = np.array([[2], [-3.], [5.]])
   with self.test_session() as sess:
     if use_static_shape_:
       a = tf.constant(a_np, dtype=tf.float32)
     else:
       a = tf.placeholder(tf.float32)
     op = util.create_operator(a)
     ax = op.apply(x)
     aty = op.apply_adjoint(y)
     op_shape = tf.convert_to_tensor(op.shape)
     if use_static_shape_:
       op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
     else:
       op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty],
                                                feed_dict={a: a_np})
     self.assertAllEqual(op_shape_val, [3, 2])
     self.assertAllClose(ax_val, [[-4], [-6], [-8]])
     self.assertAllClose(aty_val, [[18], [22]])
 def test_conjugate_gradient(self):
   np.random.seed(1)
   a_np = np.random.uniform(
       low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
   # Make a selfadjoint, positive definite.
   a_np = np.dot(a_np.T, a_np)
   # jacobi preconditioner
   jacobi_np = np.zeros_like(a_np)
   jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = (
       1.0 / a_np.diagonal())
   rhs_np = np.random.uniform(
       low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
   x_np = np.zeros_like(rhs_np)
   tol = 1e-6 if dtype_ == np.float64 else 1e-3
   max_iter = 20
   with self.cached_session() as sess:
     if use_static_shape_:
       a = constant_op.constant(a_np)
       rhs = constant_op.constant(rhs_np)
       x = constant_op.constant(x_np)
       jacobi = constant_op.constant(jacobi_np)
     else:
       a = array_ops.placeholder(dtype_)
       rhs = array_ops.placeholder(dtype_)
       x = array_ops.placeholder(dtype_)
       jacobi = array_ops.placeholder(dtype_)
     operator = util.create_operator(a)
     preconditioners = [
         None, util.identity_operator(a),
         util.create_operator(jacobi)
     ]
     cg_results = []
     for preconditioner in preconditioners:
       cg_graph = linear_equations.conjugate_gradient(
           operator,
           rhs,
           preconditioner=preconditioner,
           x=x,
           tol=tol,
           max_iter=max_iter)
       if use_static_shape_:
         cg_val = sess.run(cg_graph)
       else:
         cg_val = sess.run(
             cg_graph,
             feed_dict={
                 a: a_np,
                 rhs: rhs_np,
                 x: x_np,
                 jacobi: jacobi_np
             })
       norm_r0 = np.linalg.norm(rhs_np)
       norm_r = np.linalg.norm(cg_val.r)
       self.assertLessEqual(norm_r, tol * norm_r0)
       # Validate that we get an equally small residual norm with numpy
       # using the computed solution.
       r_np = rhs_np - np.dot(a_np, cg_val.x)
       norm_r_np = np.linalg.norm(r_np)
       self.assertLessEqual(norm_r_np, tol * norm_r0)
       cg_results.append(cg_val)
     # Validate that we get same results using identity_preconditioner
     # and None
     self.assertEqual(cg_results[0].i, cg_results[1].i)
     self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma)
     self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol)
     self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol)
     self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol)