コード例 #1
0
 def _to_dense(self):
   normalized_axis = self.reflection_axis / linalg.norm(
       self.reflection_axis, axis=-1, keepdims=True)
   mat = normalized_axis[..., array_ops.newaxis]
   matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
   return array_ops.matrix_set_diag(
       matrix, 1. + array_ops.matrix_diag_part(matrix))
コード例 #2
0
 def _to_dense(self):
     normalized_axis = self.reflection_axis / linalg.norm(
         self.reflection_axis, axis=-1, keepdims=True)
     mat = normalized_axis[..., array_ops.newaxis]
     matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
     return array_ops.matrix_set_diag(
         matrix, 1. + array_ops.matrix_diag_part(matrix))
コード例 #3
0
 def _to_dense(self):
     reflection_axis = ops.convert_to_tensor_v2_with_dispatch(
         self.reflection_axis)
     normalized_axis = reflection_axis / linalg.norm(
         reflection_axis, axis=-1, keepdims=True)
     mat = normalized_axis[..., array_ops.newaxis]
     matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
     return array_ops.matrix_set_diag(
         matrix, 1. + array_ops.matrix_diag_part(matrix))
コード例 #4
0
    def _matmul(self, x, adjoint=False, adjoint_arg=False):
        # Given a vector `v`, we would like to reflect `x` about the hyperplane
        # orthogonal to `v` going through the origin.  We first project `x` to `v`
        # to get v * dot(v, x) / dot(v, v).  After we project, we can reflect the
        # projection about the hyperplane by flipping sign to get
        # -v * dot(v, x) / dot(v, v).  Finally, we can add back the component
        # that is orthogonal to v. This is invariant under reflection, since the
        # whole hyperplane is invariant. This component is equal to x - v * dot(v,
        # x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
        # for the reflection.

        # Note that because this is a reflection, it lies in O(n) (for real vector
        # spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
        x = linalg.adjoint(x) if adjoint_arg else x
        normalized_axis = self.reflection_axis / linalg.norm(
            self.reflection_axis, axis=-1, keepdims=True)
        mat = normalized_axis[..., array_ops.newaxis]
        x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)

        return x - 2 * mat * x_dot_normalized_v
コード例 #5
0
  def _matmul(self, x, adjoint=False, adjoint_arg=False):
    # Given a vector `v`, we would like to reflect `x` about the hyperplane
    # orthogonal to `v` going through the origin.  We first project `x` to `v`
    # to get v * dot(v, x) / dot(v, v).  After we project, we can reflect the
    # projection about the hyperplane by flipping sign to get
    # -v * dot(v, x) / dot(v, v).  Finally, we can add back the component
    # that is orthogonal to v. This is invariant under reflection, since the
    # whole hyperplane is invariant. This component is equal to x - v * dot(v,
    # x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
    # for the reflection.

    # Note that because this is a reflection, it lies in O(n) (for real vector
    # spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
    x = linalg.adjoint(x) if adjoint_arg else x
    normalized_axis = self.reflection_axis / linalg.norm(
        self.reflection_axis, axis=-1, keepdims=True)
    mat = normalized_axis[..., array_ops.newaxis]
    x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)

    return x - 2 * mat * x_dot_normalized_v
コード例 #6
0
ファイル: conjugate_gradient.py プロジェクト: MFChunga/poo
 def stopping_criterion(i, state):
     return math_ops.logical_and(
         i < max_iter,
         math_ops.reduce_any(linalg.norm(state.r, axis=-1) > tol))
コード例 #7
0
ファイル: conjugate_gradient.py プロジェクト: MFChunga/poo
def conjugate_gradient(operator,
                       rhs,
                       preconditioner=None,
                       x=None,
                       tol=1e-5,
                       max_iter=20,
                       name='conjugate_gradient'):
    r"""Conjugate gradient solver.

  Solves a linear system of equations `A*x = rhs` for self-adjoint, positive
  definite matrix `A` and right-hand side vector `rhs`, using an iterative,
  matrix-free algorithm where the action of the matrix A is represented by
  `operator`. The iteration terminates when either the number of iterations
  exceeds `max_iter` or when the residual norm has been reduced to `tol`
  times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).

  Args:
    operator: A `LinearOperator` that is self-adjoint and positive definite.
    rhs: A possibly batched vector of shape `[..., N]` containing the right-hand
      size vector.
    preconditioner: A `LinearOperator` that approximates the inverse of `A`.
      An efficient preconditioner could dramatically improve the rate of
      convergence. If `preconditioner` represents matrix `M`(`M` approximates
      `A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate
      `A^{-1}x`. For this to be useful, the cost of applying `M` should be
      much lower than computing `A^{-1}` directly.
    x: A possibly batched vector of shape `[..., N]` containing the initial
      guess for the solution.
    tol: A float scalar convergence tolerance.
    max_iter: An integer giving the maximum number of iterations.
    name: A name scope for the operation.

  Returns:
    output: A namedtuple representing the final state with fields:
      - i: A scalar `int32` `Tensor`. Number of iterations executed.
      - x: A rank-1 `Tensor` of shape `[..., N]` containing the computed
          solution.
      - r: A rank-1 `Tensor` of shape `[.., M]` containing the residual vector.
      - p: A rank-1 `Tensor` of shape `[..., N]`. `A`-conjugate basis vector.
      - gamma: \\(r \dot M \dot r\\), equivalent to  \\(||r||_2^2\\) when
        `preconditioner=None`.
  """
    if not (operator.is_self_adjoint and operator.is_positive_definite):
        raise ValueError(
            'Expected a self-adjoint, positive definite operator.')

    cg_state = collections.namedtuple('CGState', ['i', 'x', 'r', 'p', 'gamma'])

    def stopping_criterion(i, state):
        return math_ops.logical_and(
            i < max_iter,
            math_ops.reduce_any(linalg.norm(state.r, axis=-1) > tol))

    def dot(x, y):
        return array_ops.squeeze(math_ops.matvec(x[..., array_ops.newaxis],
                                                 y,
                                                 adjoint_a=True),
                                 axis=-1)

    def cg_step(i, state):  # pylint: disable=missing-docstring
        z = math_ops.matvec(operator, state.p)
        alpha = state.gamma / dot(state.p, z)
        x = state.x + alpha[..., array_ops.newaxis] * state.p
        r = state.r - alpha[..., array_ops.newaxis] * z
        if preconditioner is None:
            q = r
        else:
            q = preconditioner.matvec(r)
        gamma = dot(r, q)
        beta = gamma / state.gamma
        p = q + beta[..., array_ops.newaxis] * state.p
        return i + 1, cg_state(i + 1, x, r, p, gamma)

    # We now broadcast initial shapes so that we have fixed shapes per iteration.

    with ops.name_scope(name):
        broadcast_shape = array_ops.broadcast_dynamic_shape(
            array_ops.shape(rhs)[:-1], operator.batch_shape_tensor())
        if preconditioner is not None:
            broadcast_shape = array_ops.broadcast_dynamic_shape(
                broadcast_shape, preconditioner.batch_shape_tensor())
        broadcast_rhs_shape = array_ops.concat(
            [broadcast_shape, [array_ops.shape(rhs)[-1]]], axis=-1)
        r0 = array_ops.broadcast_to(rhs, broadcast_rhs_shape)
        tol *= linalg.norm(r0, axis=-1)

        if x is None:
            x = array_ops.zeros(broadcast_rhs_shape,
                                dtype=rhs.dtype.base_dtype)
        else:
            r0 = rhs - math_ops.matvec(operator, x)
        if preconditioner is None:
            p0 = r0
        else:
            p0 = math_ops.matvec(preconditioner, r0)
        gamma0 = dot(r0, p0)
        i = constant_op.constant(0, dtype=dtypes.int32)
        state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0)
        _, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
                                               [i, state])
        return cg_state(state.i,
                        x=state.x,
                        r=state.r,
                        p=state.p,
                        gamma=state.gamma)
コード例 #8
0
 def _diag_part(self):
     normalized_axis = self.reflection_axis / linalg.norm(
         self.reflection_axis, axis=-1, keepdims=True)
     return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
コード例 #9
0
 def _diag_part(self):
     reflection_axis = ops.convert_to_tensor(self.reflection_axis)
     normalized_axis = reflection_axis / linalg.norm(
         reflection_axis, axis=-1, keepdims=True)
     return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
コード例 #10
0
 def _diag_part(self):
   normalized_axis = self.reflection_axis / linalg.norm(
       self.reflection_axis, axis=-1, keepdims=True)
   return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)