Exemple #1
0
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=0.0,
            factor=100,
            diag=None,
            warning=True):
    """Minimize the sum of squares of a set of equations.

  Description:

    Return the point which minimizes the sum of squares of M
    (non-linear) equations in N unknowns given a starting estimate, x0,
    using a modification of the Levenberg-Marquardt algorithm.

                    x = arg min(sum(func(y)**2,axis=0))
                             y

  Inputs:

    func -- A Python function or method which takes at least one
            (possibly length N vector) argument and returns M
            floating point numbers.
    x0 -- The starting estimate for the minimization.
    args -- Any extra arguments to func are placed in this tuple.
    Dfun -- A function or method to compute the Jacobian of func with
            derivatives across the rows. If this is None, the
            Jacobian will be estimated.
    full_output -- non-zero to return all optional outputs.
    col_deriv -- non-zero to specify that the Jacobian function
                 computes derivatives down the columns (faster, because
                 there is no transpose operation).
        warning -- True to print a warning message when the call is
             unsuccessful; False to suppress the warning message.

  Outputs: (x, {cov_x, infodict, mesg}, ier)

    x -- the solution (or the result of the last iteration for an
         unsuccessful call.

    cov_x -- uses the fjac and ipvt optional outputs to construct an
             estimate of the covariance matrix of the solution.
             None if a singular matrix encountered (indicates
             infinite covariance in some direction).
    infodict -- a dictionary of optional outputs with the keys:
                'nfev' : the number of function calls
                'fvec' : the function evaluated at the output
                'fjac' : A permutation of the R matrix of a QR
                         factorization of the final approximate
                         Jacobian matrix, stored column wise.
                         Together with ipvt, the covariance of the
                         estimate can be approximated.
                'ipvt' : an integer array of length N which defines
                         a permutation matrix, p, such that
                         fjac*p = q*r, where r is upper triangular
                         with diagonal elements of nonincreasing
                         magnitude. Column j of p is column ipvt(j)
                         of the identity matrix.
                'qtf'  : the vector (transpose(q) * fvec).
    mesg -- a string message giving information about the cause of failure.
    ier -- an integer flag.  If it is equal to 1, 2, 3 or 4, the
           solution was found.  Otherwise, the solution was not
           found. In either case, the optional output variable 'mesg'
           gives more information.


  Extended Inputs:

   ftol -- Relative error desired in the sum of squares.
   xtol -- Relative error desired in the approximate solution.
   gtol -- Orthogonality desired between the function vector
           and the columns of the Jacobian.
   maxfev -- The maximum number of calls to the function. If zero,
             then 100*(N+1) is the maximum where N is the number
             of elements in x0.
   epsfcn -- A suitable step length for the forward-difference
             approximation of the Jacobian (for Dfun=None). If
             epsfcn is less than the machine precision, it is assumed
             that the relative errors in the functions are of
             the order of the machine precision.
   factor -- A parameter determining the initial step bound
             (factor * || diag * x||). Should be in interval (0.1,100).
   diag -- A sequency of N positive entries that serve as a
           scale factors for the variables.

  Remarks:

    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

  See also:

      scikits.openopt, which offers a unified syntax to call this and other solvers

      fmin, fmin_powell, fmin_cg,
             fmin_bfgs, fmin_ncg -- multivariate local optimizers

      fmin_l_bfgs_b, fmin_tnc,
             fmin_cobyla -- constrained multivariate optimizers

      anneal, brute -- global optimizers

      fminbound, brent, golden, bracket -- local scalar minimizers

      fsolve -- n-dimenstional root-finding

      brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding

      fixed_point -- scalar and vector fixed-point finder

    """
    x0 = array(x0, ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args, )
    m = check_func(func, x0, args, n)[0]
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol,
                                 maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            check_func(Dfun, x0, args, n, (n, m))
        else:
            check_func(Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                                 ftol, xtol, gtol, maxfev, factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions in the sum of squares\n  are at most %f"
            % ftol, None
        ],
        2: [
            "The relative error between two consecutive iterates is at most %f"
            % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in the sum of squares\n  are at most %f and the relative error between two consecutive iterates is at \n  most %f"
            % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any column of the\n  Jacobian is at most %f in absolute value"
            % gtol, None
        ],
        5: [
            "Number of calls to function has reached maxfev = %d." % maxfev,
            ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in the approximate\n  solution is possible."
            % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the columns of\n  the Jacobian to machine precision."
            % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            if warning: print "Warning: " + errors[info][0]
        else:
            try:
                raise errors[info][1], errors[info][0]
            except KeyError:
                raise errors['unknown'][1], errors['unknown'][0]

    if n == 1:
        retval = (retval[0][0], ) + retval[1:]

    mesg = errors[info][0]
    if full_output:
        from numpy.dual import inv
        from numpy.linalg import LinAlgError
        perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
        r = triu(transpose(retval[1]['fjac'])[:n, :])
        R = dot(r, perm)
        try:
            cov_x = inv(dot(transpose(R), R))
        except LinAlgError:
            cov_x = None
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Exemple #2
0
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
            col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
            gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None):
    """
    Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple
        Any extra arguments to func are placed in this tuple.
    Dfun : callable
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool
        non-zero to return all optional outputs.
    col_deriv : bool
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float
        Relative error desired in the sum of squares.
    xtol : float
        Relative error desired in the approximate solution.
    gtol : float
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int
        The maximum number of calls to the function. If zero, then 100*(N+1) is
        the maximum where N is the number of elements in x0.
    epsfcn : float
        A suitable step length for the forward-difference approximation of the
        Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
        it is assumed that the relative errors in the functions are of the
        order of the machine precision.
    factor : float
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence
        N positive entries that serve as a scale factors for the variables.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution.  ``None`` if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual standard deviation to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the key s::

            - 'nfev' : the number of function calls
            - 'fvec' : the function evaluated at the output
            - 'fjac' : A permutation of the R matrix of a QR
                     factorization of the final approximate
                     Jacobian matrix, stored column wise.
                     Together with ipvt, the covariance of the
                     estimate can be approximated.
            - 'ipvt' : an integer array of length N which defines
                     a permutation matrix, p, such that
                     fjac*p = q*r, where r is upper triangular
                     with diagonal elements of nonincreasing
                     magnitude. Column j of p is column ipvt(j)
                     of the identity matrix.
            - 'qtf'  : the vector (transpose(q) * fvec).

    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    cov_x is a Jacobian approximation to the Hessian of the least squares
    objective function.
    This approximation assumes that the objective function is based on the
    difference between some observed target data (ydata) and a (non-linear)
    function of the parameters `f(xdata, params)` ::

           func(params) = ydata - f(xdata, params)

    so that the objective function is ::

           min   sum((ydata - f(xdata, params))**2, axis=0)
         params

    """
    x0 = array(x0, ndmin=1)
    n = len(x0)
    if type(args) != type(()):
        args = (args,)
    m = _check_func('leastsq', 'func', func, x0, args, n)[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m))
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200*(n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
                gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n,m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m,n))
        if (maxfev == 0):
            maxfev = 100*(n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                ftol, xtol, gtol, maxfev, factor, diag)

    errors = {0:["Improper input parameters.", TypeError],
              1:["Both actual and predicted relative reductions "
                 "in the sum of squares\n  are at most %f" % ftol, None],
              2:["The relative error between two consecutive "
                 "iterates is at most %f" % xtol, None],
              3:["Both actual and predicted relative reductions in "
                 "the sum of squares\n  are at most %f and the "
                 "relative error between two consecutive "
                 "iterates is at \n  most %f" % (ftol,xtol), None],
              4:["The cosine of the angle between func(x) and any "
                 "column of the\n  Jacobian is at most %f in "
                 "absolute value" % gtol, None],
              5:["Number of calls to function has reached "
                 "maxfev = %d." % maxfev, ValueError],
              6:["ftol=%f is too small, no further reduction "
                 "in the sum of squares\n  is possible.""" % ftol, ValueError],
              7:["xtol=%f is too small, no further improvement in "
                 "the approximate\n  solution is possible." % xtol, ValueError],
              8:["gtol=%f is too small, func(x) is orthogonal to the "
                 "columns of\n  the Jacobian to machine "
                 "precision." % gtol, ValueError],
              'unknown':["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if (info not in [1,2,3,4] and not full_output):
        if info in [5,6,7,8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1,2,3,4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n),retval[1]['ipvt']-1,0)
            r = triu(transpose(retval[1]['fjac'])[:n,:])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R),R))
            except LinAlgError:
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=None,
            factor=100,
            diag=None):

    x0 = np.asarray(x0).flatten()
    n = len(x0)
    if not isinstance(args, tuple):
        args = (args, )
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = np.finfo(dtype).eps
    if Dfun is None:
        if maxfev == 0:
            maxfev = 200 * (n + 1)
        with _MINPACK_LOCK:
            retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
                                     gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if maxfev == 0:
            maxfev = 100 * (n + 1)
        with _MINPACK_LOCK:
            retval = _minpack._lmder(func, Dfun, x0, args, full_output,
                                     col_deriv, ftol, xtol, gtol, maxfev,
                                     factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if info not in [1, 2, 3, 4] and not full_output:
        try:
            raise errors[info][1](errors[info][0])
        except KeyError:
            raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0)
            r = np.triu(np.transpose(retval[1]['fjac'])[:n, :])
            R = np.dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except:
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Exemple #4
0
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=0.0,
            factor=100,
            diag=None,
            warning=True):
    """Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple
        Any extra arguments to func are placed in this tuple.
    Dfun : callable
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool
        non-zero to return all optional outputs.
    col_deriv : bool
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float
        Relative error desired in the sum of squares.
    xtol : float
        Relative error desired in the approximate solution.
    gtol : float
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int
        The maximum number of calls to the function. If zero, then 100*(N+1) is
        the maximum where N is the number of elements in x0.
    epsfcn : float
        A suitable step length for the forward-difference approximation of the
        Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
        it is assumed that the relative errors in the functions are of the
        order of the machine precision.
    factor : float
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence
        N positive entries that serve as a scale factors for the variables.
    warning : bool
        Whether to print a warning message when the call is unsuccessful.
        Deprecated, use the warnings module instead.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution.  ``None`` if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual standard deviation to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the keys::

            - 'nfev' : the number of function calls
            - 'fvec' : the function evaluated at the output
            - 'fjac' : A permutation of the R matrix of a QR
                     factorization of the final approximate
                     Jacobian matrix, stored column wise.
                     Together with ipvt, the covariance of the
                     estimate can be approximated.
            - 'ipvt' : an integer array of length N which defines
                     a permutation matrix, p, such that
                     fjac*p = q*r, where r is upper triangular
                     with diagonal elements of nonincreasing
                     magnitude. Column j of p is column ipvt(j)
                     of the identity matrix.
            - 'qtf'  : the vector (transpose(q) * fvec).
    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    From scipy 0.8.0 `leastsq` returns an array of size one instead of a scalar
    when solving for a single parameter.

    """
    if not warning:
        msg = "The warning keyword is deprecated. Use the warnings module."
        warnings.warn(msg, DeprecationWarning)
    x0 = array(x0, ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args, )
    m = check_func(func, x0, args, n)[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol,
                                 maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            check_func(Dfun, x0, args, n, (n, m))
        else:
            check_func(Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                                 ftol, xtol, gtol, maxfev, factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except LinAlgError:
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Exemple #5
0
def leastsq(func,x0,args=(),Dfun=None,full_output=0,col_deriv=0,ftol=1.49012e-8,xtol=1.49012e-8,gtol=0.0,maxfev=0,epsfcn=0.0,factor=100,diag=None,warning=True):
    """Minimize the sum of squares of a set of equations.

  Description:

    Return the point which minimizes the sum of squares of M
    (non-linear) equations in N unknowns given a starting estimate, x0,
    using a modification of the Levenberg-Marquardt algorithm.

                    x = arg min(sum(func(y)**2,axis=0))
                             y

  Inputs:

    func -- A Python function or method which takes at least one
            (possibly length N vector) argument and returns M
            floating point numbers.
    x0 -- The starting estimate for the minimization.
    args -- Any extra arguments to func are placed in this tuple.
    Dfun -- A function or method to compute the Jacobian of func with
            derivatives across the rows. If this is None, the
            Jacobian will be estimated.
    full_output -- non-zero to return all optional outputs.
    col_deriv -- non-zero to specify that the Jacobian function
                 computes derivatives down the columns (faster, because
                 there is no transpose operation).
        warning -- True to print a warning message when the call is
             unsuccessful; False to suppress the warning message.

  Outputs: (x, {cov_x, infodict, mesg}, ier)

    x -- the solution (or the result of the last iteration for an
         unsuccessful call.

    cov_x -- uses the fjac and ipvt optional outputs to construct an
             estimate of the jacobian around the solution.
             None if a singular matrix encountered (indicates
             very flat curvature in some direction).  This
             matrix must be multiplied by the residual standard
             deviation to get the covariance of the parameter 
             estimates --- see curve_fit.
    infodict -- a dictionary of optional outputs with the keys:
                'nfev' : the number of function calls
                'fvec' : the function evaluated at the output
                'fjac' : A permutation of the R matrix of a QR
                         factorization of the final approximate
                         Jacobian matrix, stored column wise.
                         Together with ipvt, the covariance of the
                         estimate can be approximated.
                'ipvt' : an integer array of length N which defines
                         a permutation matrix, p, such that
                         fjac*p = q*r, where r is upper triangular
                         with diagonal elements of nonincreasing
                         magnitude. Column j of p is column ipvt(j)
                         of the identity matrix.
                'qtf'  : the vector (transpose(q) * fvec).
    mesg -- a string message giving information about the cause of failure.
    ier -- an integer flag.  If it is equal to 1, 2, 3 or 4, the
           solution was found.  Otherwise, the solution was not
           found. In either case, the optional output variable 'mesg'
           gives more information.


  Extended Inputs:

   ftol -- Relative error desired in the sum of squares.
   xtol -- Relative error desired in the approximate solution.
   gtol -- Orthogonality desired between the function vector
           and the columns of the Jacobian.
   maxfev -- The maximum number of calls to the function. If zero,
             then 100*(N+1) is the maximum where N is the number
             of elements in x0.
   epsfcn -- A suitable step length for the forward-difference
             approximation of the Jacobian (for Dfun=None). If
             epsfcn is less than the machine precision, it is assumed
             that the relative errors in the functions are of
             the order of the machine precision.
   factor -- A parameter determining the initial step bound
             (factor * || diag * x||). Should be in interval (0.1,100).
   diag -- A sequency of N positive entries that serve as a
           scale factors for the variables.

  Remarks:

    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

  See also:

      scikits.openopt, which offers a unified syntax to call this and other solvers

      fmin, fmin_powell, fmin_cg,
             fmin_bfgs, fmin_ncg -- multivariate local optimizers

      fmin_l_bfgs_b, fmin_tnc,
             fmin_cobyla -- constrained multivariate optimizers

      anneal, brute -- global optimizers

      fminbound, brent, golden, bracket -- local scalar minimizers

      fsolve -- n-dimenstional root-finding

      brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding

      fixed_point -- scalar and vector fixed-point finder

      curve_fit -- find parameters for a curve-fitting problem. 

    """
    x0 = array(x0,ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args,)
    m = check_func(func,x0,args,n)[0]
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200*(n+1)
        retval = _minpack._lmdif(func,x0,args,full_output,ftol,xtol,gtol,maxfev,epsfcn,factor,diag)
    else:
        if col_deriv:
            check_func(Dfun,x0,args,n,(n,m))
        else:
            check_func(Dfun,x0,args,n,(m,n))
        if (maxfev == 0):
            maxfev = 100*(n+1)
        retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag)

    errors = {0:["Improper input parameters.", TypeError],
              1:["Both actual and predicted relative reductions in the sum of squares\n  are at most %f" % ftol, None],
              2:["The relative error between two consecutive iterates is at most %f" % xtol, None],
              3:["Both actual and predicted relative reductions in the sum of squares\n  are at most %f and the relative error between two consecutive iterates is at \n  most %f" % (ftol,xtol), None],
              4:["The cosine of the angle between func(x) and any column of the\n  Jacobian is at most %f in absolute value" % gtol, None],
              5:["Number of calls to function has reached maxfev = %d." % maxfev, ValueError],
              6:["ftol=%f is too small, no further reduction in the sum of squares\n  is possible.""" % ftol, ValueError],
              7:["xtol=%f is too small, no further improvement in the approximate\n  solution is possible." % xtol, ValueError],
              8:["gtol=%f is too small, func(x) is orthogonal to the columns of\n  the Jacobian to machine precision." % gtol, ValueError],
              'unknown':["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if (info not in [1,2,3,4] and not full_output):
        if info in [5,6,7,8]:
            if warning:  print "Warning: " + errors[info][0]
        else:
            try:
                raise errors[info][1], errors[info][0]
            except KeyError:
                raise errors['unknown'][1], errors['unknown'][0]

    if n == 1:
        retval = (retval[0][0],) + retval[1:]

    mesg = errors[info][0]
    if full_output:
        from numpy.dual import inv
        from numpy.linalg import LinAlgError
        perm = take(eye(n),retval[1]['ipvt']-1,0)
        r = triu(transpose(retval[1]['fjac'])[:n,:])
        R = dot(r, perm)
        try:
            cov_x = inv(dot(transpose(R),R))
        except LinAlgError:
            cov_x = None
        return (retval[0], cov_x) + retval[1:-1] + (mesg,info)
    else:
        return (retval[0], info)