Ejemplo n.º 1
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out[:] = self.x0
     if self.op_is_symmetric:
         iterative.conjugate_gradient(self.op, out, observation,
                                      self.niter, self.callback)
     else:
         iterative.conjugate_gradient_normal(self.op, out, observation,
                                             self.niter, self.callback)
     return out
Ejemplo n.º 2
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out_ = out
     if out not in self.reco_space:
         out_ = self.reco_space.zero()
     out_[:] = self.x0
     if self.op_is_symmetric:
         iterative.conjugate_gradient(self.op, out_, observation,
                                      self.niter, self.callback)
     else:
         iterative.conjugate_gradient_normal(self.op, out_, observation,
                                             self.niter, self.callback)
     if out not in self.reco_space:
         out[:] = out_
     return out
Ejemplo n.º 3
0
def newtons_method(op, x, line_search, num_iter=10, cg_iter=None,
                   partial=None):
    """Newton's method for solving a system of equations.

    This is a general and optimized implementation of Newton's method
    for solving the problem::

        f(x) = 0

    of finding a root of a function.

    The algorithm is well-known and there is a vast literature about it.
    Among others, the method is described in [BV2004]_, Sections 9.5
    and 10.2 (`book available online
    <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
    [GNS2009]_,  Section 2.7 for solving nonlinear equations and Section
    11.3 for its use in minimization, and wikipedia on `Newton's_method
    <https://en.wikipedia.org/wiki/Newton's_method>`_.

    Parameters
    ----------
    op : `Operator`
        Gradient of the objective function, ``x --> grad f(x)``
    x : element in the domain of ``op``
        Starting point of the iteration
    line_search : `LineSearch`
        Strategy to choose the step length
    num_iter : `int`, optional
        Number of iterations
    cg_iter : `int`, optional
        Number of iterations in the the conjugate gradient solver,
        for computing the search direction.
    partial : `Partial`, optional
        Object executing code per iteration, e.g. plotting each iterate

    Notes
    ----------
    The algorithm works by iteratively solving

        :math:`\partial f(x_k)p_k = -f(x_k)`

    and then updating as

        :math:`x_{k+1} = x_k + \\alpha x_k`,

    where :math:`\\alpha` is a suitable step length (see the
    references). In this implementation the system of equations are
    solved using the conjugate gradient method.
    """
    # TODO: update doc
    if cg_iter is None:
        # Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
        # iterations to solve with cg
        cg_iter = op.domain.size

    # TODO: optimize by using lincomb and avoiding to create copies
    for _ in range(num_iter):

        # Initialize the search direction to 0
        search_direction = x.space.zero()

        # Compute hessian (as operator) and gradient in the current point
        hessian = op.derivative(x)
        deriv_in_point = op(x).copy()

        # Solving A*x = b for x, in this case f'(x)*p = -f(x)
        # TODO: Let the user provide/choose method for how to solve this?
        conjugate_gradient(hessian, search_direction,
                           -1 * deriv_in_point, cg_iter)

        # Computing step length
        dir_deriv = search_direction.inner(deriv_in_point)
        step_length = line_search(x, search_direction, dir_deriv)

        # Updating
        x += step_length * search_direction

        if partial is not None:
            partial(x)
Ejemplo n.º 4
0
def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
                   cg_iter=None, callback=None):
    """Newton's method for minimizing a functional.

    Notes
    -----
    This is a general and optimized implementation of Newton's method
    for solving the problem:

    .. math::
        \min f(x)

    for a differentiable function
    :math:`f: \mathcal{X}\\to \mathbb{R}` on a Hilbert space
    :math:`\mathcal{X}`. It does so by finding a zero of the gradient

    .. math::
        \\nabla f: \mathcal{X} \\to \mathcal{X}.

    of finding a root of a function.

    The algorithm is well-known and there is a vast literature about it.
    Among others, the method is described in [BV2004], Sections 9.5
    and 10.2 (`book available online
    <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
    [GNS2009],  Section 2.7 for solving nonlinear equations and Section
    11.3 for its use in minimization, and wikipedia on `Newton's_method
    <https://en.wikipedia.org/wiki/Newton's_method>`_.

    The algorithm works by iteratively solving

    .. math::
        \partial f(x_k)p_k = -f(x_k)

    and then updating as

    .. math::
        x_{k+1} = x_k + \\alpha x_k,

    where :math:`\\alpha` is a suitable step length (see the
    references). In this implementation the system of equations are
    solved using the conjugate gradient method.

    Parameters
    ----------
    f : `Functional`
        Goal functional. Needs to have ``f.gradient`` and
        ``f.gradient.derivative``.
    x : ``op.domain`` element
        Starting point of the iteration
    line_search : float or `LineSearch`, optional
        Strategy to choose the step length. If a float is given, uses it as a
        fixed step length.
    maxiter : int, optional
        Maximum number of iterations.
    tol : float, optional
        Tolerance that should be used for terminating the iteration.
    cg_iter : int, optional
        Number of iterations in the the conjugate gradient solver,
        for computing the search direction.
    callback : callable, optional
        Object executing code per iteration, e.g. plotting each iterate

    References
    ----------
    [BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
    Cambridge university press, 2004.

    [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
    optimization*. Siam, 2009.
    """
    # TODO: update doc
    grad = f.gradient
    if x not in grad.domain:
        raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
                        ''.format(x, grad.domain))

    if not callable(line_search):
        line_search = ConstantLineSearch(line_search)

    if cg_iter is None:
        # Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
        # iterations to solve with cg
        cg_iter = grad.domain.size

    # TODO: optimize by using lincomb and avoiding to create copies
    for _ in range(maxiter):

        # Initialize the search direction to 0
        search_direction = x.space.zero()

        # Compute hessian (as operator) and gradient in the current point
        hessian = grad.derivative(x)
        deriv_in_point = grad(x)

        # Solving A*x = b for x, in this case f''(x)*p = -f'(x)
        # TODO: Let the user provide/choose method for how to solve this?
        try:
            hessian_inverse = hessian.inverse
        except NotImplementedError:
            conjugate_gradient(hessian, search_direction,
                               -deriv_in_point, cg_iter)
        else:
            hessian_inverse(-deriv_in_point, out=search_direction)

        # Computing step length
        dir_deriv = search_direction.inner(deriv_in_point)
        if np.abs(dir_deriv) <= tol:
            return

        step_length = line_search(x, search_direction, dir_deriv)

        # Updating
        x += step_length * search_direction

        if callback is not None:
            callback(x)
Ejemplo n.º 5
0
def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
                   cg_iter=None, callback=None):
    """Newton's method for minimizing a functional.

    Notes
    -----
    This is a general and optimized implementation of Newton's method
    for solving the problem:

        :math:`\min f(x)`

    for a differentiable function
    :math:`f: \mathcal{X}\\to \mathbb{R}` on a Hilbert space
    :math:`\mathcal{X}`. It does so by finding a zero of the gradient

        :math:`\\nabla f: \mathcal{X} \\to \mathcal{X}`.

    of finding a root of a function.

    The algorithm is well-known and there is a vast literature about it.
    Among others, the method is described in [BV2004]_, Sections 9.5
    and 10.2 (`book available online
    <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
    [GNS2009]_,  Section 2.7 for solving nonlinear equations and Section
    11.3 for its use in minimization, and wikipedia on `Newton's_method
    <https://en.wikipedia.org/wiki/Newton's_method>`_.

    The algorithm works by iteratively solving

        :math:`\partial f(x_k)p_k = -f(x_k)`

    and then updating as

        :math:`x_{k+1} = x_k + \\alpha x_k`,

    where :math:`\\alpha` is a suitable step length (see the
    references). In this implementation the system of equations are
    solved using the conjugate gradient method.

    Parameters
    ----------
    f : `Functional`
        Goal functional. Needs to have ``f.gradient`` and
        ``f.gradient.derivative``.
    x : ``op.domain`` element
        Starting point of the iteration
    line_search : float or `LineSearch`, optional
        Strategy to choose the step length. If a float is given, uses it as a
        fixed step length.
    maxiter : int, optional
        Maximum number of iterations.
    tol : float, optional
        Tolerance that should be used for terminating the iteration.
    cg_iter : int, optional
        Number of iterations in the the conjugate gradient solver,
        for computing the search direction.
    callback : callable, optional
        Object executing code per iteration, e.g. plotting each iterate
    """
    # TODO: update doc
    grad = f.gradient
    if x not in grad.domain:
        raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
                        ''.format(x, grad.domain))

    if not callable(line_search):
        line_search = ConstantLineSearch(line_search)

    if cg_iter is None:
        # Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
        # iterations to solve with cg
        cg_iter = grad.domain.size

    # TODO: optimize by using lincomb and avoiding to create copies
    for _ in range(maxiter):

        # Initialize the search direction to 0
        search_direction = x.space.zero()

        # Compute hessian (as operator) and gradient in the current point
        hessian = grad.derivative(x)
        deriv_in_point = grad(x)

        # Solving A*x = b for x, in this case f''(x)*p = -f'(x)
        # TODO: Let the user provide/choose method for how to solve this?
        conjugate_gradient(hessian, search_direction,
                           -deriv_in_point, cg_iter)

        # Computing step length
        dir_deriv = search_direction.inner(deriv_in_point)
        if np.abs(dir_deriv) <= tol:
            return

        step_length = line_search(x, search_direction, dir_deriv)

        # Updating
        x += step_length * search_direction

        if callback is not None:
            callback(x)
Ejemplo n.º 6
0
def newtons_method(op,
                   x,
                   line_search,
                   num_iter=10,
                   cg_iter=None,
                   callback=None):
    """Newton's method for solving a system of equations.

    This is a general and optimized implementation of Newton's method
    for solving the problem::

        f(x) = 0

    of finding a root of a function.

    The algorithm is well-known and there is a vast literature about it.
    Among others, the method is described in [BV2004]_, Sections 9.5
    and 10.2 (`book available online
    <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
    [GNS2009]_,  Section 2.7 for solving nonlinear equations and Section
    11.3 for its use in minimization, and wikipedia on `Newton's_method
    <https://en.wikipedia.org/wiki/Newton's_method>`_.

    Parameters
    ----------
    op : `Operator`
        Gradient of the objective function, ``x --> grad f(x)``
    x : element in the domain of ``op``
        Starting point of the iteration
    line_search : `LineSearch`
        Strategy to choose the step length
    num_iter : `int`, optional
        Number of iterations
    cg_iter : `int`, optional
        Number of iterations in the the conjugate gradient solver,
        for computing the search direction.
    callback : `callable`, optional
        Object executing code per iteration, e.g. plotting each iterate

    Notes
    ----------
    The algorithm works by iteratively solving

        :math:`\partial f(x_k)p_k = -f(x_k)`

    and then updating as

        :math:`x_{k+1} = x_k + \\alpha x_k`,

    where :math:`\\alpha` is a suitable step length (see the
    references). In this implementation the system of equations are
    solved using the conjugate gradient method.
    """
    # TODO: update doc
    if cg_iter is None:
        # Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
        # iterations to solve with cg
        cg_iter = op.domain.size

    # TODO: optimize by using lincomb and avoiding to create copies
    for _ in range(num_iter):

        # Initialize the search direction to 0
        search_direction = x.space.zero()

        # Compute hessian (as operator) and gradient in the current point
        hessian = op.derivative(x)
        deriv_in_point = op(x).copy()

        # Solving A*x = b for x, in this case f'(x)*p = -f(x)
        # TODO: Let the user provide/choose method for how to solve this?
        conjugate_gradient(hessian, search_direction, -1 * deriv_in_point,
                           cg_iter)

        # Computing step length
        dir_deriv = search_direction.inner(deriv_in_point)
        step_length = line_search(x, search_direction, dir_deriv)

        # Updating
        x += step_length * search_direction

        if callback is not None:
            callback(x)