Ejemplo n.º 1
0
 def cgls_step(i, state):
     q = operator.apply(state.p)
     alpha = state.gamma / util.l2norm_squared(q)
     x = state.x + alpha * state.p
     r = state.r - alpha * q
     s = operator.apply_adjoint(r)
     gamma = util.l2norm_squared(s)
     beta = gamma / state.gamma
     p = s + beta * state.p
     return i + 1, cgls_state(i + 1, x, r, p, gamma)
Ejemplo n.º 2
0
 def cgls_step(i, state):
   q = operator.apply(state.p)
   alpha = state.gamma / util.l2norm_squared(q)
   x = state.x + alpha * state.p
   r = state.r - alpha * q
   s = operator.apply_adjoint(r)
   gamma = util.l2norm_squared(s)
   beta = gamma / state.gamma
   p = s + beta * state.p
   return i + 1, cgls_state(i + 1, x, r, p, gamma)
Ejemplo n.º 3
0
 def cg_step(i, state):
   z = operator.apply(state.p)
   alpha = state.gamma / util.dot(state.p, z)
   x = state.x + alpha * state.p
   r = state.r - alpha * z
   gamma = util.l2norm_squared(r)
   beta = gamma / state.gamma
   p = r + beta * state.p
   return i + 1, cg_state(i + 1, x, r, p, gamma)
Ejemplo n.º 4
0
 def cg_step(i, state):
     z = operator.apply(state.p)
     alpha = state.gamma / util.dot(state.p, z)
     x = state.x + alpha * state.p
     r = state.r - alpha * z
     gamma = util.l2norm_squared(r)
     beta = gamma / state.gamma
     p = r + beta * state.p
     return i + 1, cg_state(i + 1, x, r, p, gamma)
Ejemplo n.º 5
0
 def testL2Norm(self):
   with self.test_session():
     x_np = np.array([[2], [-3.], [5.]])
     x_norm_np = np.linalg.norm(x_np)
     x_normalized_np = x_np / x_norm_np
     x = constant_op.constant(x_np)
     l2norm = util.l2norm(x)
     l2norm_squared = util.l2norm_squared(x)
     x_normalized, x_norm = util.l2normalize(x)
     self.assertAllClose(l2norm.eval(), x_norm_np)
     self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np))
     self.assertAllClose(x_norm.eval(), x_norm_np)
     self.assertAllClose(x_normalized.eval(), x_normalized_np)
Ejemplo n.º 6
0
 def testL2Norm(self):
     with self.test_session():
         x_np = np.array([[2], [-3.], [5.]])
         x_norm_np = np.linalg.norm(x_np)
         x_normalized_np = x_np / x_norm_np
         x = constant_op.constant(x_np)
         l2norm = util.l2norm(x)
         l2norm_squared = util.l2norm_squared(x)
         x_normalized, x_norm = util.l2normalize(x)
         self.assertAllClose(l2norm.eval(), x_norm_np)
         self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np))
         self.assertAllClose(x_norm.eval(), x_norm_np)
         self.assertAllClose(x_normalized.eval(), x_normalized_np)
Ejemplo n.º 7
0
def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
    r"""Conjugate gradient least squares solver.

  Solves a linear least squares problem \\(||A x - rhs||_2\\) for a single
  righ-hand side, using an iterative, matrix-free algorithm where the action of
  the matrix A is represented by `operator`. The CGLS algorithm implicitly
  applies the symmetric conjugate gradient algorithm to the normal equations
  \\(A^* A x = A^* rhs\\). The iteration terminates when either
  the number of iterations exceeds `max_iter` or when the norm of the conjugate
  residual (residual of the normal equations) have been reduced to `tol` times
  its initial initial value, i.e.
  \\(||A^* (rhs - A x_k)|| <= tol ||A^* rhs||\\).

  Args:
    operator: An object representing a linear operator with attributes:
      - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
        length 2. `shape[0]` is the dimension on the domain of the operator,
        `shape[1]` is the dimension of the co-domain of the operator. On other
        words, if operator represents an M x N matrix A, `shape` must contain
        `[M, N]`.
      - dtype: The datatype of input to and output from `apply` and
        `apply_adjoint`.
      - apply: Callable object taking a vector `x` as input and returning a
        vector with the result of applying the operator to `x`, i.e. if
       `operator` represents matrix `A`, `apply` should return `A * x`.
      - apply_adjoint: Callable object taking a vector `x` as input and
        returning a vector with the result of applying the adjoint operator
        to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
        return `conj(transpose(A)) * x`.

    rhs: A rank-1 `Tensor` of shape `[M]` containing the right-hand size vector.
    tol: A float scalar convergence tolerance.
    max_iter: An integer giving the maximum number of iterations.
    name: A name scope for the operation.


  Returns:
    output: A namedtuple representing the final state with fields:
      - i: A scalar `int32` `Tensor`. Number of iterations executed.
      - x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
      - r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
      - p: A rank-1 `Tensor` of shape `[N]`. The next descent direction.
      - gamma: \\(||A^* r||_2^2\\)
  """
    # ephemeral class holding CGLS state.
    cgls_state = collections.namedtuple("CGLSState",
                                        ["i", "x", "r", "p", "gamma"])

    def stopping_criterion(i, state):
        return math_ops.logical_and(i < max_iter, state.gamma > tol)

    # TODO(rmlarsen): add preconditioning
    def cgls_step(i, state):
        q = operator.apply(state.p)
        alpha = state.gamma / util.l2norm_squared(q)
        x = state.x + alpha * state.p
        r = state.r - alpha * q
        s = operator.apply_adjoint(r)
        gamma = util.l2norm_squared(s)
        beta = gamma / state.gamma
        p = s + beta * state.p
        return i + 1, cgls_state(i + 1, x, r, p, gamma)

    with ops.name_scope(name):
        n = operator.shape[1:]
        rhs = array_ops.expand_dims(rhs, -1)
        s0 = operator.apply_adjoint(rhs)
        gamma0 = util.l2norm_squared(s0)
        tol = tol * tol * gamma0
        x = array_ops.expand_dims(
            array_ops.zeros(n, dtype=rhs.dtype.base_dtype), -1)
        i = constant_op.constant(0, dtype=dtypes.int32)
        state = cgls_state(i=i, x=x, r=rhs, p=s0, gamma=gamma0)
        _, state = control_flow_ops.while_loop(stopping_criterion, cgls_step,
                                               [i, state])
        return cgls_state(state.i,
                          x=array_ops.squeeze(state.x),
                          r=array_ops.squeeze(state.r),
                          p=array_ops.squeeze(state.p),
                          gamma=state.gamma)
Ejemplo n.º 8
0
def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
  r"""Conjugate gradient least squares solver.

  Solves a linear least squares problem \\(||A x - rhs||_2\\) for a single
  righ-hand side, using an iterative, matrix-free algorithm where the action of
  the matrix A is represented by `operator`. The CGLS algorithm implicitly
  applies the symmetric conjugate gradient algorithm to the normal equations
  \\(A^* A x = A^* rhs\\). The iteration terminates when either
  the number of iterations exceeds `max_iter` or when the norm of the conjugate
  residual (residual of the normal equations) have been reduced to `tol` times
  its initial initial value, i.e.
  \\(||A^* (rhs - A x_k)|| <= tol ||A^* rhs||\\).

  Args:
    operator: An object representing a linear operator with attributes:
      - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
        length 2. `shape[0]` is the dimension on the domain of the operator,
        `shape[1]` is the dimension of the co-domain of the operator. On other
        words, if operator represents an M x N matrix A, `shape` must contain
        `[M, N]`.
      - dtype: The datatype of input to and output from `apply` and
        `apply_adjoint`.
      - apply: Callable object taking a vector `x` as input and returning a
        vector with the result of applying the operator to `x`, i.e. if
       `operator` represents matrix `A`, `apply` should return `A * x`.
      - apply_adjoint: Callable object taking a vector `x` as input and
        returning a vector with the result of applying the adjoint operator
        to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
        return `conj(transpose(A)) * x`.

    rhs: A rank-1 `Tensor` of shape `[M]` containing the right-hand size vector.
    tol: A float scalar convergence tolerance.
    max_iter: An integer giving the maximum number of iterations.
    name: A name scope for the operation.


  Returns:
    output: A namedtuple representing the final state with fields:
      - i: A scalar `int32` `Tensor`. Number of iterations executed.
      - x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
      - r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
      - p: A rank-1 `Tensor` of shape `[N]`. The next descent direction.
      - gamma: \\(||A^* r||_2^2\\)
  """
  # ephemeral class holding CGLS state.
  cgls_state = collections.namedtuple("CGLSState",
                                      ["i", "x", "r", "p", "gamma"])

  def stopping_criterion(i, state):
    return tf.logical_and(i < max_iter, state.gamma > tol)

  # TODO(rmlarsen): add preconditioning
  def cgls_step(i, state):
    q = operator.apply(state.p)
    alpha = state.gamma / util.l2norm_squared(q)
    x = state.x + alpha * state.p
    r = state.r - alpha * q
    s = operator.apply_adjoint(r)
    gamma = util.l2norm_squared(s)
    beta = gamma / state.gamma
    p = s + beta * state.p
    return i + 1, cgls_state(i + 1, x, r, p, gamma)

  with tf.name_scope(name):
    n = operator.shape[1:]
    rhs = tf.expand_dims(rhs, -1)
    s0 = operator.apply_adjoint(rhs)
    gamma0 = util.l2norm_squared(s0)
    tol = tol * tol * gamma0
    x = tf.expand_dims(tf.zeros(n, dtype=rhs.dtype.base_dtype), -1)
    i = tf.constant(0, dtype=tf.int32)
    state = cgls_state(i=i, x=x, r=rhs, p=s0, gamma=gamma0)
    _, state = tf.while_loop(stopping_criterion, cgls_step, [i, state])
    return cgls_state(
        state.i,
        x=tf.squeeze(state.x),
        r=tf.squeeze(state.r),
        p=tf.squeeze(state.p),
        gamma=state.gamma)
Ejemplo n.º 9
0
def conjugate_gradient(operator,
                       rhs,
                       tol=1e-4,
                       max_iter=20,
                       name="conjugate_gradient"):
  r"""Conjugate gradient solver.

  Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
  definite matrix `A` and righ-hand side vector `rhs`, using an iterative,
  matrix-free algorithm where the action of the matrix A is represented by
  `operator`. The iteration terminates when either the number of iterations
  exceeds `max_iter` or when the residual norm has been reduced to `tol`
  times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).

  Args:
    operator: An object representing a linear operator with attributes:
      - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
        length 2. `shape[0]` is the dimension on the domain of the operator,
        `shape[1]` is the dimension of the co-domain of the operator. On other
        words, if operator represents an N x N matrix A, `shape` must contain
        `[N, N]`.
      - dtype: The datatype of input to and output from `apply`.
      - apply: Callable object taking a vector `x` as input and returning a
        vector with the result of applying the operator to `x`, i.e. if
       `operator` represents matrix `A`, `apply` should return `A * x`.
    rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
    tol: A float scalar convergence tolerance.
    max_iter: An integer giving the maximum number of iterations.
    name: A name scope for the operation.

  Returns:
    output: A namedtuple representing the final state with fields:
      - i: A scalar `int32` `Tensor`. Number of iterations executed.
      - x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
      - r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
      - p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
      - gamma: \\(||r||_2^2\\)
  """
  # ephemeral class holding CG state.
  cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])

  def stopping_criterion(i, state):
    return math_ops.logical_and(i < max_iter, state.gamma > tol)

  # TODO(rmlarsen): add preconditioning
  def cg_step(i, state):
    z = operator.apply(state.p)
    alpha = state.gamma / util.dot(state.p, z)
    x = state.x + alpha * state.p
    r = state.r - alpha * z
    gamma = util.l2norm_squared(r)
    beta = gamma / state.gamma
    p = r + beta * state.p
    return i + 1, cg_state(i + 1, x, r, p, gamma)

  with ops.name_scope(name):
    n = operator.shape[1:]
    rhs = array_ops.expand_dims(rhs, -1)
    gamma0 = util.l2norm_squared(rhs)
    tol = tol * tol * gamma0
    x = array_ops.expand_dims(
        array_ops.zeros(
            n, dtype=rhs.dtype.base_dtype), -1)
    i = constant_op.constant(0, dtype=dtypes.int32)
    state = cg_state(i=i, x=x, r=rhs, p=rhs, gamma=gamma0)
    _, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
                                           [i, state])
    return cg_state(
        state.i,
        x=array_ops.squeeze(state.x),
        r=array_ops.squeeze(state.r),
        p=array_ops.squeeze(state.p),
        gamma=state.gamma)
Ejemplo n.º 10
0
def conjugate_gradient(operator,
                       rhs,
                       tol=1e-4,
                       max_iter=20,
                       name="conjugate_gradient"):
    r"""Conjugate gradient solver.

  Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
  definite matrix `A` and righ-hand side vector `rhs`, using an iterative,
  matrix-free algorithm where the action of the matrix A is represented by
  `operator`. The iteration terminates when either the number of iterations
  exceeds `max_iter` or when the residual norm has been reduced to `tol`
  times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).

  Args:
    operator: An object representing a linear operator with attributes:
      - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
        length 2. `shape[0]` is the dimension on the domain of the operator,
        `shape[1]` is the dimension of the co-domain of the operator. On other
        words, if operator represents an N x N matrix A, `shape` must contain
        `[N, N]`.
      - dtype: The datatype of input to and output from `apply`.
      - apply: Callable object taking a vector `x` as input and returning a
        vector with the result of applying the operator to `x`, i.e. if
       `operator` represents matrix `A`, `apply` should return `A * x`.
    rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
    tol: A float scalar convergence tolerance.
    max_iter: An integer giving the maximum number of iterations.
    name: A name scope for the operation.

  Returns:
    output: A namedtuple representing the final state with fields:
      - i: A scalar `int32` `Tensor`. Number of iterations executed.
      - x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
      - r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
      - p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
      - gamma: \\(||r||_2^2\\)
  """
    # ephemeral class holding CG state.
    cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])

    def stopping_criterion(i, state):
        return math_ops.logical_and(i < max_iter, state.gamma > tol)

    # TODO (rmlarsen): add preconditioning id:1554 gh:1555
    def cg_step(i, state):
        z = operator.apply(state.p)
        alpha = state.gamma / util.dot(state.p, z)
        x = state.x + alpha * state.p
        r = state.r - alpha * z
        gamma = util.l2norm_squared(r)
        beta = gamma / state.gamma
        p = r + beta * state.p
        return i + 1, cg_state(i + 1, x, r, p, gamma)

    with ops.name_scope(name):
        n = operator.shape[1:]
        rhs = array_ops.expand_dims(rhs, -1)
        gamma0 = util.l2norm_squared(rhs)
        tol = tol * tol * gamma0
        x = array_ops.expand_dims(
            array_ops.zeros(n, dtype=rhs.dtype.base_dtype), -1)
        i = constant_op.constant(0, dtype=dtypes.int32)
        state = cg_state(i=i, x=x, r=rhs, p=rhs, gamma=gamma0)
        _, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
                                               [i, state])
        return cg_state(state.i,
                        x=array_ops.squeeze(state.x),
                        r=array_ops.squeeze(state.r),
                        p=array_ops.squeeze(state.p),
                        gamma=state.gamma)