Exemplo n.º 1
0
def calcN(classKernels, trainLabels):
    N = zeros((len(trainLabels), len(trainLabels)))
    for i, l in enumerate(unique(trainLabels)):
        numExamplesWithLabel = len(where(trainLabels == l)[0])
        Idiff = identity(numExamplesWithLabel, Float64) - (1.0 / numExamplesWithLabel) * ones(numExamplesWithLabel, Float64)
        firstDot = dot(classKernels[i], Idiff)
        labelTerm = dot(firstDot, transpose(classKernels[i]))
        N += labelTerm
    N = nan_to_num(N)
    #make N more numerically stable
    #if I had more time, I would train this parameter, but I don't
    additionToN = ((mean(diag(N)) + 1) / 100.0) * identity(N.shape[0], Float64) 
    N += additionToN
            
    #make sure N is invertable
    for i in range(1000):
        try:
            inv(N)
        except LinAlgError:
            #doing this to make sure the maxtrix is invertable
            #large value supported by section titled
            #"numerical issues and regularization" in the paper
            N += additionToN

    return N
Exemplo n.º 2
0
def fishersLinearDiscriminent(trainData, trainLabels, testData, testLabels):
    numClasses = max(trainLabels) + 1
    N = [0] * numClasses
    m = [0] * numClasses
    for x,t in izip(trainData,trainLabels):
        m[t] += x
        N[t] += 1
    for i in range(numClasses):
        m[i] /= N[i]
    Sw = zeros((trainData.shape[1], trainData.shape[1]))
    for x,t in izip(trainData, trainLabels):
        Sw += outer(x-m[t], x-m[t])
    try:
        inv(Sw)
    except LinAlgError:
        Sw += 0.1 * identity(Sw.shape[0], Float64)    
    
    w = dot(inv(Sw),(m[0] - m[1]))
    meanVect = (N[0]*m[0] + N[1]*m[1]) / sum(N)
    
    numCorrect = 0
    for x,t in izip(testData, testLabels):
        if dot(w, (x-meanVect)) > 0:
            if t == 1:
                numCorrect += 1
        else:
            if t == 0:
                numCorrect += 1
    return float(numCorrect) / float(len(testLabels))
Exemplo n.º 3
0
def estimateGaussian(trainData, trainLabels):
    numClasses = max(trainLabels) + 1
    N = [0]* numClasses
    mu = [0.0] * numClasses
    Slist = [zeros((trainData.shape[1], trainData.shape[1]))] * numClasses
    pList = [0.0] * numClasses
    
    #calculate N, and sum x's for mu
    for x,t in izip(trainData, trainLabels):
        N[t] += 1
        mu[t] += x
    #normalize mu
    for i in range(numClasses):
        mu[i] = mu[i] / float(N[i])
    #calculate the class probabilities
    for i in range(numClasses):
        pList[i] = float(N[i]) / sum(N)
    #calculate S0 and S1
    for x,t in izip(trainData, trainLabels):
        Slist[t] += outer(x - mu[t], x - mu[t])
        try:
            inv(Slist[t])
        except LinAlgError:
            Slist[t] += 0.1 * identity(Slist[t].shape[0], Float64)
        
    return (numClasses, N, mu, Slist, pList)
Exemplo n.º 4
0
    def _cov2wt(self, cov):
        """ Convert covariance matrix(-ices) to weights.
        """

        from numpy.dual import inv

        if len(cov.shape) == 2:
            return inv(cov)
        else:
            weights = numpy.zeros(cov.shape, float)

            for i in range(cov.shape[-1]):  # n
                weights[:,:,i] = inv(cov[:,:,i])

            return weights
def factor_target_pose(world_homography, target_homography):
    """
    Find the rigid transformation that maps target coordinates to world coordinates,
    given homographies that map world coordinates and target coordinates to a common
    coordinate system (i.e., the camera).
    """
    return extract_transformation(np.dot(inv(world_homography), target_homography))
Exemplo n.º 6
0
def gaussian_prob(x, m, C, use_log=False):
    """
    % GAUSSIAN_PROB Evaluate a multivariate Gaussian density.
    % p = gaussian_prob(X, m, C)
    % p(i) = N(X(:,i), m, C) where C = covariance matrix and each COLUMN of x is a datavector
    
    % p = gaussian_prob(X, m, C, 1) returns log N(X(:,i), m, C) (to prevents underflow).
    %
    % If X has size dxN, then p has size Nx1, where N = number of examples
    """

    m = np.asmatrix(m)
    if m.shape[0] == 1:
        m = m.T

    d, N = x.shape

    M = np.tile(m, (1, N))  # replicate the mean across columns
    denom = (2 * pi) ** (0.5 * d) * np.sqrt(np.abs(det(C)))
    mahal = np.sum(np.multiply(np.dot((x - M).T, inv(C)), (x - M).T), 1)  # Chris Bregler's trick
    if np.any(mahal < 0):
        logging.warning("mahal < 0 => C is not psd")
    if use_log:
        p = -0.5 * mahal - np.log(denom)
    else:
        p = np.exp(-0.5 * mahal) / (denom + EPS)

    return np.asarray(p)[:, 0]
Exemplo n.º 7
0
def doubleU(phi, l, tVector):
    #can't call lamba by it's name, because that's a reserved word in python
    #so I'm calling it l
    lIdentity = l*identity(phi.shape[1])
    phiDotPhi = dot(phi.transpose(), phi)
    firstTerm = inv(lIdentity + phiDotPhi)
    phiDotT = dot(phi.transpose(), tVector)
    return squeeze(dot(firstTerm, phiDotT))
Exemplo n.º 8
0
def trainKFD(trainKernel, trainLabels):
    classKernels = getClassKernels(trainKernel, trainLabels)
    M = calcM(classKernels, trainLabels)
    N = calcN(classKernels, trainLabels)
    '''
    print "train kernel:",trainKernel
    print "Class kernels:", classKernels
    print "M",M
    print "N",N
    '''
    try:
        solutionMatrix = dot(inv(N), M)
    except LinAlgError:
        #if we get a singular matrix here, there isn't much we can do about it
        #just skip this configuration
        solutionMatrix = identity(N.shape[0], Float64)
        
    solutionMatrix = nan_to_num(solutionMatrix)
    
    eVals, eVects = eig(solutionMatrix)
    #find the 'leading' term i.e. find the eigenvector with the highest eigenvalue
    alphaVect = eVects[:, absolute(eVals).argmax()].real.astype(Float64)
    trainProjections = dot(trainKernel, alphaVect)
    '''
    print 'alpha = ', alphaVect
    print 'train kernel = ', trainKernel
    print 'train projction = ', trainProjections
    '''     
    #train sigmoid based on evaluation accuracy
    #accuracyError = lambda x: 100.0 - evaluations(trainLabels, classifyKFDValues(trainProjections, *list(x)))[0]
    accuracyError = lambda x: 100.0 - evaluations(trainLabels, classifyKFDValues(trainProjections, *x))[0]
    #get an initial guess by brute force
    #ranges = ((-100, 100, 1), (-100, 100, 1))
    #x0 = brute(accuracyError, ranges)
    
    #popt = minimize(accuracyError, x0.tolist(), method="Powell").x

    rc = LSFAIL
    niter = 0
    i = 0
    while rc in (LSFAIL, INFEASIBLE, CONSTANT, NOPROGRESS, USERABORT, MAXFUN) or niter <= 1:
        if i == 10:
            break
        #get a 'smarter' x0
        #ranges = ((-1000, 1000, 100), (-1000, 1000, 100))
        ranges = ((-10**(i + 1), 10**(i + 1), 10**i),) * 2
        x0 = brute(accuracyError, ranges)
        (popt, niter, rc) = fmin_tnc(accuracyError, x0, approx_grad=True)
        #popt = fmin_tnc(accuracyError, x0.tolist(), approx_grad=True)[0]
        i += 1
    
    return (alphaVect, popt)
Exemplo n.º 9
0
def generativeSharedCov(trainData, trainLabels, testData, testLabels):
    (numClasses, N, mu, Slist, pList) = estimateGaussian(trainData, trainLabels)
    #i.e. calculate everything we need for the model
    #normalize the S's, and calculate the final S
    S = zeros(Slist[0].shape)
    for i in range(numClasses):
        Slist[i] = Slist[i] / float(N[i])
        S += pList[i] * Slist[i]
    
    w = dot(inv(S), (mu[1] - mu[0]))
    w0 = -0.5* dot(dot(mu[1], inv(S)), mu[1]) + 0.5*dot(dot(mu[0], inv(S)), mu[0]) + log(pList[1]/pList[0])
    
    numCorrect = 0
    for x,t in izip(testData, testLabels):
        probClass1 = sigmoid(dot(w, x) + w0)
        if probClass1 >= 0.5:
            if t == 1:
                numCorrect += 1
        else:
            if t == 0:
                numCorrect += 1
    return float(numCorrect) / float(len(testLabels))
Exemplo n.º 10
0
def logisticRegression(trainData, trainLabels, testData, testLabels):
    #adjust the data, adding the 'free parameter' to the train data
    trainDataWithFreeParam = hstack((trainData.copy(), ones(trainData.shape[0])[:,newaxis]))
    testDataWithFreeParam = hstack((testData.copy(), ones(testData.shape[0])[:,newaxis]))
    
    alpha = 10
    oldW = zeros(trainDataWithFreeParam.shape[1])
    newW = ones(trainDataWithFreeParam.shape[1])
    iteration = 0
    
    trainDataWithFreeParamTranspose = transpose(trainDataWithFreeParam)
    alphaI = alpha * identity(oldW.shape[0])
    
    while not array_equal(oldW, newW):
        if iteration == 100:
            break
        oldW = newW.copy()
        
        yVect = yVector(oldW, trainDataWithFreeParam)
        r = R(yVect)

        firstTerm = inv(alphaI + dot(dot(trainDataWithFreeParamTranspose, r), trainDataWithFreeParam))
        secondTerm = dot(trainDataWithFreeParamTranspose, (yVect-trainLabels)) + alpha * oldW
        newW = oldW - dot(firstTerm, secondTerm)
        iteration += 1
                              
        
    #see how well we did
    numCorrect  = 0
    for x,t in izip(testDataWithFreeParam, testLabels):
        
        if yScalar(newW, x) >= 0.5:
            if t == 1:
                numCorrect += 1
        else:
            if t == 0:
                numCorrect += 1
    return float(numCorrect) / float(len(testLabels))
Exemplo n.º 11
0
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=0.0,
            factor=100,
            diag=None,
            warning=True):
    """Minimize the sum of squares of a set of equations.

  Description:

    Return the point which minimizes the sum of squares of M
    (non-linear) equations in N unknowns given a starting estimate, x0,
    using a modification of the Levenberg-Marquardt algorithm.

                    x = arg min(sum(func(y)**2,axis=0))
                             y

  Inputs:

    func -- A Python function or method which takes at least one
            (possibly length N vector) argument and returns M
            floating point numbers.
    x0 -- The starting estimate for the minimization.
    args -- Any extra arguments to func are placed in this tuple.
    Dfun -- A function or method to compute the Jacobian of func with
            derivatives across the rows. If this is None, the
            Jacobian will be estimated.
    full_output -- non-zero to return all optional outputs.
    col_deriv -- non-zero to specify that the Jacobian function
                 computes derivatives down the columns (faster, because
                 there is no transpose operation).
        warning -- True to print a warning message when the call is
             unsuccessful; False to suppress the warning message.

  Outputs: (x, {cov_x, infodict, mesg}, ier)

    x -- the solution (or the result of the last iteration for an
         unsuccessful call.

    cov_x -- uses the fjac and ipvt optional outputs to construct an
             estimate of the jacobian around the solution.
             None if a singular matrix encountered (indicates
             very flat curvature in some direction).  This
             matrix must be multiplied by the residual standard
             deviation to get the covariance of the parameter 
             estimates --- see curve_fit.
    infodict -- a dictionary of optional outputs with the keys:
                'nfev' : the number of function calls
                'fvec' : the function evaluated at the output
                'fjac' : A permutation of the R matrix of a QR
                         factorization of the final approximate
                         Jacobian matrix, stored column wise.
                         Together with ipvt, the covariance of the
                         estimate can be approximated.
                'ipvt' : an integer array of length N which defines
                         a permutation matrix, p, such that
                         fjac*p = q*r, where r is upper triangular
                         with diagonal elements of nonincreasing
                         magnitude. Column j of p is column ipvt(j)
                         of the identity matrix.
                'qtf'  : the vector (transpose(q) * fvec).
    mesg -- a string message giving information about the cause of failure.
    ier -- an integer flag.  If it is equal to 1, 2, 3 or 4, the
           solution was found.  Otherwise, the solution was not
           found. In either case, the optional output variable 'mesg'
           gives more information.


  Extended Inputs:

   ftol -- Relative error desired in the sum of squares.
   xtol -- Relative error desired in the approximate solution.
   gtol -- Orthogonality desired between the function vector
           and the columns of the Jacobian.
   maxfev -- The maximum number of calls to the function. If zero,
             then 100*(N+1) is the maximum where N is the number
             of elements in x0.
   epsfcn -- A suitable step length for the forward-difference
             approximation of the Jacobian (for Dfun=None). If
             epsfcn is less than the machine precision, it is assumed
             that the relative errors in the functions are of
             the order of the machine precision.
   factor -- A parameter determining the initial step bound
             (factor * || diag * x||). Should be in interval (0.1,100).
   diag -- A sequency of N positive entries that serve as a
           scale factors for the variables.

  Remarks:

    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

  See also:

      scikits.openopt, which offers a unified syntax to call this and other solvers

      fmin, fmin_powell, fmin_cg,
             fmin_bfgs, fmin_ncg -- multivariate local optimizers

      fmin_l_bfgs_b, fmin_tnc,
             fmin_cobyla -- constrained multivariate optimizers

      anneal, brute -- global optimizers

      fminbound, brent, golden, bracket -- local scalar minimizers

      fsolve -- n-dimenstional root-finding

      brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding

      fixed_point -- scalar and vector fixed-point finder

      curve_fit -- find parameters for a curve-fitting problem. 

    """
    x0 = array(x0, ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args, )
    m = check_func(func, x0, args, n)[0]
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol,
                                 maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            check_func(Dfun, x0, args, n, (n, m))
        else:
            check_func(Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                                 ftol, xtol, gtol, maxfev, factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions in the sum of squares\n  are at most %f"
            % ftol, None
        ],
        2: [
            "The relative error between two consecutive iterates is at most %f"
            % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in the sum of squares\n  are at most %f and the relative error between two consecutive iterates is at \n  most %f"
            % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any column of the\n  Jacobian is at most %f in absolute value"
            % gtol, None
        ],
        5: [
            "Number of calls to function has reached maxfev = %d." % maxfev,
            ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in the approximate\n  solution is possible."
            % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the columns of\n  the Jacobian to machine precision."
            % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            if warning: print "Warning: " + errors[info][0]
        else:
            try:
                raise errors[info][1], errors[info][0]
            except KeyError:
                raise errors['unknown'][1], errors['unknown'][0]

    if n == 1:
        retval = (retval[0][0], ) + retval[1:]

    mesg = errors[info][0]
    if full_output:
        from numpy.dual import inv
        from numpy.linalg import LinAlgError
        perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
        r = triu(transpose(retval[1]['fjac'])[:n, :])
        R = dot(r, perm)
        try:
            cov_x = inv(dot(transpose(R), R))
        except LinAlgError:
            cov_x = None
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Exemplo n.º 12
0
def approx_covar(hess, red_chi2):
    return red_chi2 * inv(phess / 2.)
Exemplo n.º 13
0
    def leastsq(self, params=None, **kws):
        """
        Use Levenberg-Marquardt minimization to perform a fit.
        This assumes that ModelParameters have been stored, and a function to
        minimize has been properly set up.

        This wraps scipy.optimize.leastsq.

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        Writes outputs to many internal attributes.

        Parameters
        ----------
        params : Parameters, optional
           Parameters to use as starting points.
        kws : dict, optional
            Minimizer options to pass to scipy.optimize.leastsq.

        Returns
        -------
        success : bool
            True if fit was successful, False if not.
        """
        result = self.prepare_fit(params=params)
        vars = result.init_vals
        nvars = len(vars)
        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
                     gtol=1.e-7, maxfev=2000*(nvars+1), Dfun=None)

        lskws.update(self.kws)
        lskws.update(kws)

        self.col_deriv = False
        if lskws['Dfun'] is not None:
            self.jacfcn = lskws['Dfun']
            self.col_deriv = lskws['col_deriv']
            lskws['Dfun'] = self.__jacobian

        # suppress runtime warnings during fit and error analysis
        orig_warn_settings = np.geterr()
        np.seterr(all='ignore')

        lsout = scipy_leastsq(self.__residual, vars, **lskws)
        _best, _cov, infodict, errmsg, ier = lsout
        result.aborted = self._abort
        self._abort = False

        result.residual = resid = infodict['fvec']
        result.ier = ier
        result.lmdif_message = errmsg
        result.message = 'Fit succeeded.'
        result.success = ier in [1, 2, 3, 4]
        if result.aborted:
            result.message = 'Fit aborted by user callback.'
            result.success = False
        elif ier == 0:
            result.message = 'Invalid Input Parameters.'
        elif ier == 5:
            result.message = self.err_maxfev % lskws['maxfev']
        else:
            result.message = 'Tolerance seems to be too small.'

        result.ndata = len(resid)

        result.chisqr = (resid**2).sum()
        result.nfree = (result.ndata - nvars)
        result.redchi = result.chisqr / result.nfree
        _log_likelihood = result.ndata * np.log(result.redchi)
        result.aic = _log_likelihood + 2 * nvars
        result.bic = _log_likelihood + np.log(result.ndata) * nvars

        params = result.params

        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad = ones_like(_best)
        vbest = ones_like(_best)

        # ensure that _best, vbest, and grad are not
        # broken 1-element ndarrays.
        if len(np.shape(_best)) == 0:
            _best = np.array([_best])
        if len(np.shape(vbest)) == 0:
            vbest = np.array([vbest])
        if len(np.shape(grad)) == 0:
            grad = np.array([grad])

        for ivar, name in enumerate(result.var_names):
            grad[ivar] = params[name].scale_gradient(_best[ivar])
            vbest[ivar] = params[name].value

        # modified from JJ Helmus' leastsqbound.py
        infodict['fjac'] = transpose(transpose(infodict['fjac']) /
                                     take(grad, infodict['ipvt'] - 1))
        rvec = dot(triu(transpose(infodict['fjac'])[:nvars, :]),
                   take(eye(nvars), infodict['ipvt'] - 1, 0))
        try:
            result.covar = inv(dot(transpose(rvec), rvec))
        except (LinAlgError, ValueError):
            result.covar = None

        has_expr = False
        for par in params.values():
            par.stderr, par.correl = 0, None
            has_expr = has_expr or par.expr is not None

        # self.errorbars = error bars were successfully estimated
        result.errorbars = (result.covar is not None)
        if result.aborted:
            result.errorbars = False
        if result.errorbars:
            if self.scale_covar:
                result.covar *= result.redchi
            for ivar, name in enumerate(result.var_names):
                par = params[name]
                par.stderr = sqrt(result.covar[ivar, ivar])
                par.correl = {}
                try:
                    result.errorbars = result.errorbars and (par.stderr > 0.0)
                    for jvar, varn2 in enumerate(result.var_names):
                        if jvar != ivar:
                            par.correl[varn2] = (result.covar[ivar, jvar] /
                                 (par.stderr * sqrt(result.covar[jvar, jvar])))
                except:
                    result.errorbars = False

            if has_expr:
                # uncertainties on constrained parameters:
                #   get values with uncertainties (including correlations),
                #   temporarily set Parameter values to these,
                #   re-evaluate contrained parameters to extract stderr
                #   and then set Parameters back to best-fit value
                try:
                    uvars = uncertainties.correlated_values(vbest, result.covar)
                except (LinAlgError, ValueError):
                    uvars = None
                if uvars is not None:
                    for par in params.values():
                        eval_stderr(par, uvars, result.var_names, params)
                    # restore nominal values
                    for v, nam in zip(uvars, result.var_names):
                        params[nam].value = v.nominal_value

        if not result.errorbars:
            result.message = '%s. Could not estimate error-bars' % result.message

        np.seterr(**orig_warn_settings)
        return result
def back_project(homography, points):
    return transform(inv(homography), points)
Exemplo n.º 15
0
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=None,
            factor=100,
            diag=None):
    """
    Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers. It must not return NaNs or
        fitting might fail.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple, optional
        Any extra arguments to func are placed in this tuple.
    Dfun : callable, optional
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool, optional
        non-zero to return all optional outputs.
    col_deriv : bool, optional
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float, optional
        Relative error desired in the sum of squares.
    xtol : float, optional
        Relative error desired in the approximate solution.
    gtol : float, optional
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int, optional
        The maximum number of calls to the function. If `Dfun` is provided
        then the default `maxfev` is 100*(N+1) where N is the number of elements
        in x0, otherwise the default `maxfev` is 200*(N+1).
    epsfcn : float, optional
        A variable used in determining a suitable step length for the forward-
        difference approximation of the Jacobian (for Dfun=None).
        Normally the actual step length will be sqrt(epsfcn)*x
        If epsfcn is less than the machine precision, it is assumed that the
        relative errors are of the order of the machine precision.
    factor : float, optional
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence, optional
        N positive entries that serve as a scale factors for the variables.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution. None if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual variance to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the key s:

        ``nfev``
            The number of function calls
        ``fvec``
            The function evaluated at the output
        ``fjac``
            A permutation of the R matrix of a QR
            factorization of the final approximate
            Jacobian matrix, stored column wise.
            Together with ipvt, the covariance of the
            estimate can be approximated.
        ``ipvt``
            An integer array of length N which defines
            a permutation matrix, p, such that
            fjac*p = q*r, where r is upper triangular
            with diagonal elements of nonincreasing
            magnitude. Column j of p is column ipvt(j)
            of the identity matrix.
        ``qtf``
            The vector (transpose(q) * fvec).

    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    cov_x is a Jacobian approximation to the Hessian of the least squares
    objective function.
    This approximation assumes that the objective function is based on the
    difference between some observed target data (ydata) and a (non-linear)
    function of the parameters `f(xdata, params)` ::

           func(params) = ydata - f(xdata, params)

    so that the objective function is ::

           min   sum((ydata - f(xdata, params))**2, axis=0)
         params

    """
    x0 = asarray(x0).flatten()
    n = len(x0)
    if not isinstance(args, tuple):
        args = (args, )
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps
    if Dfun is None:
        if maxfev == 0:
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol,
                                 maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if maxfev == 0:
            maxfev = 100 * (n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                                 ftol, xtol, gtol, maxfev, factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if info not in [1, 2, 3, 4] and not full_output:
        if info in [5, 6, 7, 8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except (LinAlgError, ValueError):
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Exemplo n.º 16
0
    def leastsq(self, **kws):
        """
        Use Levenberg-Marquardt minimization to perform a fit.
        This assumes that ModelParameters have been stored, and a function to
        minimize has been properly set up.

        This wraps scipy.optimize.leastsq.

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        Writes outputs to many internal attributes.

        Parameters
        ----------
        kws : dict, optional
            Minimizer options to pass to scipy.optimize.leastsq.

        Returns
        -------
        success : bool
            True if fit was successful, False if not.
        """
        self.prepare_fit()
        lskws = dict(full_output=1,
                     xtol=1.e-7,
                     ftol=1.e-7,
                     gtol=1.e-7,
                     maxfev=2000 * (self.nvarys + 1),
                     Dfun=None)

        lskws.update(self.kws)
        lskws.update(kws)

        if lskws['Dfun'] is not None:
            self.jacfcn = lskws['Dfun']
            lskws['Dfun'] = self.__jacobian

        # suppress runtime warnings during fit and error analysis
        orig_warn_settings = np.geterr()
        np.seterr(all='ignore')
        lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
        _best, _cov, infodict, errmsg, ier = lsout

        self.residual = resid = infodict['fvec']
        self.ier = ier
        self.lmdif_message = errmsg
        self.message = 'Fit succeeded.'
        self.success = ier in [1, 2, 3, 4]

        if ier == 0:
            self.message = 'Invalid Input Parameters.'
        elif ier == 5:
            self.message = self.err_maxfev % lskws['maxfev']
        else:
            self.message = 'Tolerance seems to be too small.'

        self.nfev = infodict['nfev']
        self.ndata = len(resid)

        sum_sqr = (resid**2).sum()
        self.chisqr = sum_sqr
        self.nfree = (self.ndata - self.nvarys)
        self.redchi = sum_sqr / self.nfree

        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad = ones_like(_best)
        vbest = ones_like(_best)

        # ensure that _best, vbest, and grad are not
        # broken 1-element ndarrays.
        if len(np.shape(_best)) == 0:
            _best = np.array([_best])
        if len(np.shape(vbest)) == 0:
            vbest = np.array([vbest])
        if len(np.shape(grad)) == 0:
            grad = np.array([grad])

        for ivar, varname in enumerate(self.var_map):
            par = self.params[varname]
            grad[ivar] = par.scale_gradient(_best[ivar])
            vbest[ivar] = par.value

        # modified from JJ Helmus' leastsqbound.py
        infodict['fjac'] = transpose(
            transpose(infodict['fjac']) / take(grad, infodict['ipvt'] - 1))
        rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
                   take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
        try:
            self.covar = inv(dot(transpose(rvec), rvec))
        except (LinAlgError, ValueError):
            self.covar = None

        has_expr = False
        for par in self.params.values():
            par.stderr, par.correl = 0, None
            has_expr = has_expr or par.expr is not None

        # self.errorbars = error bars were successfully estimated
        self.errorbars = self.covar is not None

        if self.covar is not None:
            if self.scale_covar:
                self.covar = self.covar * sum_sqr / self.nfree

            for ivar, varname in enumerate(self.var_map):
                par = self.params[varname]
                par.stderr = sqrt(self.covar[ivar, ivar])
                par.correl = {}
                try:
                    self.errorbars = self.errorbars and (par.stderr > 0.0)
                    for jvar, varn2 in enumerate(self.var_map):
                        if jvar != ivar:
                            par.correl[varn2] = (
                                self.covar[ivar, jvar] /
                                (par.stderr * sqrt(self.covar[jvar, jvar])))
                except:
                    self.errorbars = False

            uvars = None
            if has_expr:
                # uncertainties on constrained parameters:
                #   get values with uncertainties (including correlations),
                #   temporarily set Parameter values to these,
                #   re-evaluate contrained parameters to extract stderr
                #   and then set Parameters back to best-fit value
                try:
                    uvars = uncertainties.correlated_values(vbest, self.covar)
                except (LinAlgError, ValueError):
                    uvars = None

                if uvars is not None:
                    for par in self.params.values():
                        eval_stderr(par, uvars, self.var_map, self.params,
                                    self.asteval)
                    # restore nominal values
                    for v, nam in zip(uvars, self.var_map):
                        self.asteval.symtable[nam] = v.nominal_value

        if not self.errorbars:
            self.message = '%s. Could not estimate error-bars' % self.message

        np.seterr(**orig_warn_settings)
        self.unprepare_fit()
        return self.success
Exemplo n.º 17
0
Arquivo: util.py Projeto: puwe/DAMASK
def leastsqBound(func,
                 x0,
                 args=(),
                 bounds=None,
                 Dfun=None,
                 full_output=0,
                 col_deriv=0,
                 ftol=1.49012e-8,
                 xtol=1.49012e-8,
                 gtol=0.0,
                 maxfev=0,
                 epsfcn=None,
                 factor=100,
                 diag=None):
    from scipy.optimize import _minpack
    """
  Non-linear least square fitting (Levenberg-Marquardt method) with 
  bounded parameters.
  the codes of transformation between int <-> ext refers to the work of
  Jonathan J. Helmus: https://github.com/jjhelmus/leastsqbound-scipy
  other codes refers to the source code of minpack.py:
  ..\Lib\site-packages\scipy\optimize\minpack.py

  An internal parameter list is used to enforce contraints on the fitting 
  parameters. The transfomation is based on that of MINUIT package. 
  please see: F. James and M. Winkler. MINUIT User's Guide, 2004.

  bounds : list
    (min, max) pairs for each parameter, use None for 'min' or 'max' 
    when there is no bound in that direction. 
    For example: if there are two parameters needed to be fitting, then
    bounds is [(min1,max1), (min2,max2)]
    
  This function is based on 'leastsq' of minpack.py, the annotation of
  other parameters can be found in 'leastsq'.
  ..\Lib\site-packages\scipy\optimize\minpack.py
  """
    def _check_func(checker,
                    argname,
                    thefunc,
                    x0,
                    args,
                    numinputs,
                    output_shape=None):
        """The same as that of minpack.py"""
        res = np.atleast_1d(thefunc(*((x0[:numinputs], ) + args)))
        if (output_shape is not None) and (shape(res) != output_shape):
            if (output_shape[0] != 1):
                if len(output_shape) > 1:
                    if output_shape[1] == 1:
                        return shape(res)
                msg = "%s: there is a mismatch between the input and output " \
                      "shape of the '%s' argument" % (checker, argname)
                func_name = getattr(thefunc, '__name__', None)
                if func_name:
                    msg += " '%s'." % func_name
                else:
                    msg += "."
                raise TypeError(msg)
        if np.issubdtype(res.dtype, np.inexact):
            dt = res.dtype
        else:
            dt = dtype(float)
        return shape(res), dt

    def _int2extGrad(p_int, bounds):
        """Calculate the gradients of transforming the internal (unconstrained) to external (constrained) parameter."""
        grad = np.empty_like(p_int)
        for i, (x, bound) in enumerate(zip(p_int, bounds)):
            lower, upper = bound
            if lower is None and upper is None:  # No constraints
                grad[i] = 1.0
            elif upper is None:  # only lower bound
                grad[i] = x / np.sqrt(x * x + 1.0)
            elif lower is None:  # only upper bound
                grad[i] = -x / np.sqrt(x * x + 1.0)
            else:  # lower and upper bounds
                grad[i] = (upper - lower) * np.cos(x) / 2.0
        return grad

    def _int2extFunc(bounds):
        """transform internal parameters into external parameters."""
        local = [_int2extLocal(b) for b in bounds]

        def _transform_i2e(p_int):
            p_ext = np.empty_like(p_int)
            p_ext[:] = [i(j) for i, j in zip(local, p_int)]
            return p_ext

        return _transform_i2e

    def _ext2intFunc(bounds):
        """transform external parameters into internal parameters."""
        local = [_ext2intLocal(b) for b in bounds]

        def _transform_e2i(p_ext):
            p_int = np.empty_like(p_ext)
            p_int[:] = [i(j) for i, j in zip(local, p_ext)]
            return p_int

        return _transform_e2i

    def _int2extLocal(bound):
        """transform a single internal parameter to an external parameter."""
        lower, upper = bound
        if lower is None and upper is None:  # no constraints
            return lambda x: x
        elif upper is None:  # only lower bound
            return lambda x: lower - 1.0 + np.sqrt(x * x + 1.0)
        elif lower is None:  # only upper bound
            return lambda x: upper + 1.0 - np.sqrt(x * x + 1.0)
        else:
            return lambda x: lower + (
                (upper - lower) / 2.0) * (np.sin(x) + 1.0)

    def _ext2intLocal(bound):
        """transform a single external parameter to an internal parameter."""
        lower, upper = bound
        if lower is None and upper is None:  # no constraints
            return lambda x: x
        elif upper is None:  # only lower bound
            return lambda x: np.sqrt((x - lower + 1.0)**2 - 1.0)
        elif lower is None:  # only upper bound
            return lambda x: np.sqrt((x - upper - 1.0)**2 - 1.0)
        else:
            return lambda x: np.arcsin((2.0 * (x - lower) /
                                        (upper - lower)) - 1.0)

    i2e = _int2extFunc(bounds)
    e2i = _ext2intFunc(bounds)

    x0 = np.asarray(x0).flatten()
    n = len(x0)

    if len(bounds) != n:
        raise ValueError(
            'the length of bounds is inconsistent with the number of parameters '
        )

    if not isinstance(args, tuple):
        args = (args, )

    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]

    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = np.finfo(dtype).eps

    def funcWarp(x, *args):
        return func(i2e(x), *args)

    xi0 = e2i(x0)

    if Dfun is None:
        if maxfev == 0:
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol,
                                 gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if maxfev == 0:
            maxfev = 100 * (n + 1)

        def DfunWarp(x, *args):
            return Dfun(i2e(x), *args)

        retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output,
                                 col_deriv, ftol, xtol, gtol, maxfev, factor,
                                 diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if info not in [1, 2, 3, 4] and not full_output:
        if info in [5, 6, 7, 8]:
            np.warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    x = i2e(retval[0])

    if full_output:
        grad = _int2extGrad(retval[0], bounds)
        retval[1]['fjac'] = (retval[1]['fjac'].T /
                             np.take(grad, retval[1]['ipvt'] - 1)).T
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0)
            r = np.triu(np.transpose(retval[1]['fjac'])[:n, :])
            R = np.dot(r, perm)
            try:
                cov_x = inv(np.dot(np.transpose(R), R))
            except LinAlgError as inverror:
                print(inverror)
                pass
        return (x, cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (x, info)
Exemplo n.º 18
0
def leastsqbound(func,
                 x0,
                 args=(),
                 bounds=None,
                 Dfun=None,
                 full_output=0,
                 col_deriv=0,
                 ftol=1.49012e-8,
                 xtol=1.49012e-8,
                 gtol=0.0,
                 maxfev=0,
                 epsfcn=None,
                 factor=100,
                 diag=None):
    """
    Bounded minimization of the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple
        Any extra arguments to func are placed in this tuple.
    bounds : list
        ``(min, max)`` pairs for each element in ``x``, defining
        the bounds on that parameter. Use None for one of ``min`` or
        ``max`` when there is no bound in that direction.
    Dfun : callable
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool
        non-zero to return all optional outputs.
    col_deriv : bool
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float
        Relative error desired in the sum of squares.
    xtol : float
        Relative error desired in the approximate solution.
    gtol : float
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int
        The maximum number of calls to the function. If zero, then 100*(N+1) is
        the maximum where N is the number of elements in x0.
    epsfcn : float
        A suitable step length for the forward-difference approximation of the
        Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
        it is assumed that the relative errors in the functions are of the
        order of the machine precision.
    factor : float
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence
        N positive entries that serve as a scale factors for the variables.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution.  ``None`` if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual standard deviation to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the key s::

            - 'nfev' : the number of function calls
            - 'fvec' : the function evaluated at the output
            - 'fjac' : A permutation of the R matrix of a QR
                     factorization of the final approximate
                     Jacobian matrix, stored column wise.
                     Together with ipvt, the covariance of the
                     estimate can be approximated.
            - 'ipvt' : an integer array of length N which defines
                     a permutation matrix, p, such that
                     fjac*p = q*r, where r is upper triangular
                     with diagonal elements of nonincreasing
                     magnitude. Column j of p is column ipvt(j)
                     of the identity matrix.
            - 'qtf'  : the vector (transpose(q) * fvec).

    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    cov_x is a Jacobian approximation to the Hessian of the least squares
    objective function.
    This approximation assumes that the objective function is based on the
    difference between some observed target data (ydata) and a (non-linear)
    function of the parameters `f(xdata, params)` ::

           func(params) = ydata - f(xdata, params)

    so that the objective function is ::

           min   sum((ydata - f(xdata, params))**2, axis=0)
         params

    Contraints on the parameters are enforced using an internal parameter list
    with appropiate transformations such that these internal parameters can be
    optimized without constraints. The transfomation between a given internal
    parameter, p_i, and a external parameter, p_e, are as follows:

    With ``min`` and ``max`` bounds defined ::

        p_i = arcsin((2 * (p_e - min) / (max - min)) - 1.)
        p_e = min + ((max - min) / 2.) * (sin(p_i) + 1.)

    With only ``max`` defined ::

        p_i = sqrt((max - p_e + 1.)**2 - 1.)
        p_e = max + 1. - sqrt(p_i**2 + 1.)

    With only ``min`` defined ::

        p_i = sqrt((p_e - min + 1.)**2 - 1.)
        p_e = min - 1. + sqrt(p_i**2 + 1.)

    These transfomations are used in the MINUIT package, and described in
    detail in the section 1.3.1 of the MINUIT User's Guide.

    To Do
    -----
    Currently the ``factor`` and ``diag`` parameters scale the
    internal parameter list, but should scale the external parameter list.

    The `qtf` vector in the infodic dictionary reflects internal parameter
    list, it should be correct to reflect the external parameter list.

    References
    ----------
    * F. James and M. Winkler. MINUIT User's Guide, July 16, 2004.

    """
    # use leastsq if no bounds are present
    if bounds is None:
        return leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol,
                       xtol, gtol, maxfev, epsfcn, factor, diag)

    # create function which convert between internal and external parameters
    i2e = _internal2external_func(bounds)
    e2i = _external2internal_func(bounds)

    x0 = asarray(x0).flatten()
    i0 = e2i(x0)
    n = len(x0)
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')
    if not isinstance(args, tuple):
        args = (args, )
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps

    # define a wrapped func which accept internal parameters, converts them
    # to external parameters and calls func
    def wfunc(x, *args):
        return func(i2e(x), *args)

    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
                                 gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)

        def wDfun(x, *args):  # wrapped Dfun
            return Dfun(i2e(x), *args)

        retval = _minpack._lmder(wfunc, wDfun, i0, args, full_output,
                                 col_deriv, ftol, xtol, gtol, maxfev, factor,
                                 diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    x = i2e(retval[0])  # internal params to external params

    if full_output:
        # convert fjac from internal params to external
        grad = _internal2external_grad(retval[0], bounds)
        retval[1]['fjac'] = (retval[1]['fjac'].T /
                             take(grad, retval[1]['ipvt'] - 1)).T
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except LinAlgError:
                pass
        return (x, cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (x, info)
Exemplo n.º 19
0
def leastsqbound(func,
                 x0,
                 args=(),
                 bounds=None,
                 Dfun=None,
                 full_output=0,
                 col_deriv=0,
                 ftol=1.49012e-8,
                 xtol=1.49012e-8,
                 gtol=0.0,
                 maxfev=0,
                 epsfcn=None,
                 factor=100,
                 diag=None):
    # use leastsq if no bounds are present
    if bounds is None:
        return leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol,
                       xtol, gtol, maxfev, epsfcn, factor, diag)

    # create function which convert between internal and external parameters
    i2e = _internal2external_func(bounds)
    e2i = _external2internal_func(bounds)

    x0 = asarray(x0).flatten()
    i0 = e2i(x0)
    n = len(x0)
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')
    if not isinstance(args, tuple):
        args = (args, )
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps

    # define a wrapped func which accept internal parameters, converts them
    # to external parameters and calls func
    def wfunc(x, *args):
        return func(i2e(x), *args)

    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
                                 gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)

        def wDfun(x, *args):  # wrapped Dfun
            return Dfun(i2e(x), *args)

        retval = _minpack._lmder(wfunc, wDfun, i0, args, full_output,
                                 col_deriv, ftol, xtol, gtol, maxfev, factor,
                                 diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            pass  #warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    x = i2e(retval[0])  # internal params to external params

    if full_output:
        # convert fjac from internal params to external
        grad = _internal2external_grad(retval[0], bounds)
        retval[1]['fjac'] = (retval[1]['fjac'].T /
                             take(grad, retval[1]['ipvt'] - 1)).T
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except LinAlgError:
                pass
        return (x, cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (x, info)
Exemplo n.º 20
0
    def leastsq(self, **kws):
        """
        use Levenberg-Marquardt minimization to perform fit.
        This assumes that ModelParameters have been stored,
        and a function to minimize has been properly set up.

        This wraps scipy.optimize.leastsq, and keyward arguments are passed
        directly as options to scipy.optimize.leastsq

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        writes outputs to many internal attributes, and
        returns True if fit was successful, False if not.
        """
        self.prepare_fit(force=True)
        toler = self.toler
        lskws = dict(xtol=toler, ftol=toler,
                     gtol=toler, maxfev=1000*(self.nvarys+1), Dfun=None)
        lskws.update(self.kws)
        lskws.update(kws)

        if lskws['Dfun'] is not None:
            self.jacfcn = lskws['Dfun']
            lskws['Dfun'] = self.__jacobian

        lsout = leastsq(self.__residual, self.vars, **lskws)
        del self.vars

        _best, cov, infodict, errmsg, ier = lsout
        resid  = infodict['fvec']
        ndata  = len(resid)
        chisqr = (resid**2).sum()
        nfree  = ndata - self.nvarys
        redchi = chisqr / nfree

        group = self.paramgroup
        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad  = ones_like(_best)  # holds scaled gradient for variables
        vbest = ones_like(_best)  # holds best values for variables
        named_params = {}         # var names : parameter object
        for ivar, name in enumerate(self.var_names):
            named_params[name] = par = getattr(group, name)
            grad[ivar]  = par.scale_gradient(_best[ivar])
            vbest[ivar] = par.value
            par.stderr  = 0
            par.correl  = {}
            par._uval   = None
        # modified from JJ Helmus' leastsqbound.py
        # compute covariance matrix here explicitly...
        infodict['fjac'] = transpose(transpose(infodict['fjac']) /
                                     take(grad, infodict['ipvt'] - 1))

        rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys,:]),
                take(eye(self.nvarys),infodict['ipvt'] - 1,0))
        try:
            cov = inv(dot(transpose(rvec),rvec))
        except (LinAlgError, ValueError):
            cov = None

        # map covariance matrix to parameter uncertainties
        # and correlations
        if cov is not None:
            if self.scale_covar:
                cov = cov * chisqr / nfree

            # uncertainties for constrained parameters:
            #   get values with uncertainties (including correlations),
            #   temporarily set Parameter values to these,
            #   re-evaluate contrained parameters to extract stderr
            #   and then set Parameters back to best-fit value
            try:
                uvars = uncertainties.correlated_values(vbest, cov)
            except (LinAlgError, ValueError):
                cov, uvars = None, None
            group.covar_vars = self.var_names
            group.covar = cov

            if uvars is not None:
                # set stderr and correlations for variable, named parameters:
                for iv, name in enumerate(self.var_names):
                    p = named_params[name]
                    p.stderr = uvars[iv].std_dev()
                    p._uval  = uvars[iv]
                    p.correl = {}
                    for jv, name2 in enumerate(self.var_names):
                        if jv != iv:
                            p.correl[name2] = (cov[iv, jv]/
                                               (p.stderr * sqrt(cov[jv, jv])))
                for nam in dir(self.paramgroup):
                    obj = getattr(self.paramgroup, nam)
                    eval_stderr(obj, uvars, self.var_names,
                                named_params, self._larch)

                # restore nominal values that may have been tweaked to
                # calculate other stderrs
                for uval, nam in zip(uvars, self.var_names):
                    named_params[nam]._val  = uval.nominal_value

            # clear any errors evaluting uncertainties
            if self._larch.error:
                self._larch.error = []

        # collect results for output group
        message = 'Fit succeeded.'

        if ier == 0:
            message = 'Invalid Input Parameters.'
        elif ier == 5:
            message = self.err_maxfev % lskws['maxfev']
        elif ier > 5:
            message = 'See lmdif_message.'
        if cov is None:
            message = '%s Could not estimate error-bars' % message

        ofit = group
        if Group is not None:
            ofit = group.fit_details = Group()

        ofit.method = 'leastsq'
        ofit.fjac  = infodict['fjac']
        ofit.fvec  = infodict['fvec']
        ofit.qtf   = infodict['qtf']
        ofit.ipvt  = infodict['ipvt']
        ofit.nfev  = infodict['nfev']
        ofit.status  = ier
        ofit.message = errmsg
        ofit.success = ier in [1, 2, 3, 4]
        ofit.toler   = self.toler

        group.residual   = resid
        group.message    = message
        group.chi_square = chisqr
        group.chi_reduced = redchi
        group.nvarys     = self.nvarys
        group.nfree      = nfree
        group.errorbars  = cov is not None
        return ier
Exemplo n.º 21
0
    def leastsq(self, **kws):
        """
        use Levenberg-Marquardt minimization to perform fit.
        This assumes that ModelParameters have been stored,
        and a function to minimize has been properly set up.

        This wraps scipy.optimize.leastsq, and keyword arguments are passed
        directly as options to scipy.optimize.leastsq

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        writes outputs to many internal attributes, and
        returns True if fit was successful, False if not.
        """
        self.prepare_fit()
        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
                     gtol=1.e-7, maxfev=2000*(self.nvarys+1), Dfun=None)

        lskws.update(self.kws)
        lskws.update(kws)

        if lskws['Dfun'] is not None:
            self.jacfcn = lskws['Dfun']
            lskws['Dfun'] = self.__jacobian

        lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
        _best, _cov, infodict, errmsg, ier = lsout

        self.residual = resid = infodict['fvec']


        self.ier = ier
        self.lmdif_message = errmsg
        self.message = 'Fit succeeded.'
        self.success = ier in [1, 2, 3, 4]

        if ier == 0:
            self.message = 'Invalid Input Parameters.'
        elif ier == 5:
            self.message = self.err_maxfev % lskws['maxfev']
        else:
            self.message = 'Tolerance seems to be too small.'

        self.nfev =  infodict['nfev']
        self.ndata = len(resid)

        sum_sqr = (resid**2).sum()
        self.chisqr = sum_sqr
        self.nfree = (self.ndata - self.nvarys)
        self.redchi = sum_sqr / self.nfree

        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad = ones_like(_best)
        vbest = ones_like(_best)
        for ivar, varname in enumerate(self.var_map):
            par = self.params[varname]
            grad[ivar] = par.scale_gradient(_best[ivar])
            vbest[ivar] = par.value

        # modified from JJ Helmus' leastsqbound.py

        infodict['fjac'] = transpose(transpose(infodict['fjac']) /
                                     take(grad, infodict['ipvt'] - 1))
        rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
                   take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
        try:
            cov = inv(dot(transpose(rvec), rvec))
        except (LinAlgError, ValueError):
            cov = None

        for par in self.params.values():
            par.stderr, par.correl = 0, None

        self.covar = cov
        if cov is None:
            self.errorbars = False
            self.message = '%s. Could not estimate error-bars'
        else:
            self.errorbars = True
            if self.scale_covar:
                self.covar = cov = cov * sum_sqr / self.nfree

            # uncertainties on constrained parameters:
            #   get values with uncertainties (including correlations),
            #   temporarily set Parameter values to these,
            #   re-evaluate contrained parameters to extract stderr
            #   and then set Parameters back to best-fit value
            uvars = uncertainties.correlated_values(vbest, self.covar)

            for ivar, varname in enumerate(self.var_map):
                par = self.params[varname]
                par.stderr = sqrt(cov[ivar, ivar])
                par.correl = {}
                for jvar, varn2 in enumerate(self.var_map):
                    if jvar != ivar:
                        par.correl[varn2] = (cov[ivar, jvar]/
                                        (par.stderr * sqrt(cov[jvar, jvar])))

            for pname, par in self.params.items():
                eval_stderr(par, uvars, self.var_map,
                            self.params, self.asteval)

            # restore nominal values
            for v, nam in zip(uvars, self.var_map):
                self.asteval.symtable[nam] = v.nominal_value

        for par in self.params.values():
            if hasattr(par, 'ast'):
                delattr(par, 'ast')
        return self.success
Exemplo n.º 22
0
def lmdif(func, x, args=(), full_output=0,
          ftol=data_type(1.49012e-8), xtol=data_type(1.49012e-8),
          gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
    """
    Minimize the sum of the squares of m nonlinear functions in n
        variables by a modification of the levenberg-marquardt
        algorithm. The user must provide a subroutine which calculates
        the functions. The jacobian is then calculated by a
        forward-difference approximation

    Parameters
    ----------
    func: callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers. It must not return NaNs or
        fitting might fail.
    x: ndarray
        The starting estimate for the minimization.
    args: tuple, optional
        Any extra arguments to func are placed in this tuple.
    full_output: bool, optional
        non-zero to return all optional outputs.
    ftol: float, optional
        Relative error desired in the sum of squares.
    xtol: float, optional
        Relative error desired in the approximate solution.
    gtol: float, optional
        Orthogonality desired between the function vector and the
        columns of the Jacobian.
    maxfev: int, optional
        The maximum number of calls to the function. If `Dfun` is
        provided then the default `maxfev` is 100*(N+1) where N is the
        number of elements in x0, otherwise the default `maxfev` is
        200*(N+1).
    epsfcn: float, optional
        A variable used in determining a suitable step length for the
        forward-difference approximation of the Jacobian (for
        Dfun=None). Normally the actual step length will be
        sqrt(epsfcn)*x If epsfcn is less than the machine precision,
        it is assumed that the relative errors are of the order of the
        machine precision.
    factor: float, optional
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1,
        100)``.
    diag: sequence, optional
        N positive entries that serve as a scale factors for the
        variables. If set None, the variables will be scaled internally

    Returns
    -------
    x: ndarray
        The solution (or the result of the last iteration for an
        unsuccessful call).
    cov_x: ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution. None if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual variance to get the covariance of the parameter
        estimates -- see curve_fit.
    infodict: dict
        a dictionary of optional outputs with the key s:

        ``nfev``
            The number of function calls
        ``fvec``
            The function evaluated at the output
        ``fjac``
            A permutation of the R matrix of a QR
            factorization of the final approximate
            Jacobian matrix, stored column wise.
            Together with ipvt, the covariance of the
            estimate can be approximated.
        ``ipvt``
            An integer array of length N which defines
            a permutation matrix, p, such that
            fjac*p = q*r, where r is upper triangular
            with diagonal elements of nonincreasing
            magnitude. Column j of p is column ipvt(j)
            of the identity matrix.
        ``qtf``
            The vector (transpose(q) * fvec).

    mesg: str
        A string message giving information about the cause of failure.
    ier: int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution
        was found.  O therwise, the solution was not found. In either
        case, the optional output variable 'mesg' gives more
        information.
    """

    # region : Initialize part of parameters

    global eps_machine, wa4, qtf
    global p1, p5, p25, p75, p0001
    ier = 0

    x = np.asarray(x).flatten()
    if not isinstance(args, tuple):
        args = (args,)
    if epsfcn is None:
        epsfcn = finfo(utility.data_type).eps

    if diag is None:
        mode = 1
    else:
        mode = 2

    # endregion : Initialize  part of parameters

    # region : Check the input parameters for errors

    if ftol < 0. or xtol < 0. or gtol < 0. or factor <= 0:
        raise ValueError('!!! Some input parameters for lmdif ' +
                         'are illegal')

    if diag is not None:  # if mode == 2
        for d in diag:
            if d <= 0:
                raise ValueError('!!! Entries in diag must be positive')

    # endregion : Check the input parameters for errors

    # region : Preparation before main loop

    # > evaluate the function at the starting point and calculate
    # its norm
    # :: evaluate r(x) -> fvec
    fvec = func(x, *args)
    # :: evaluate ||r(x)||_2 -> fnorm
    fnorm = enorm(fvec)
    if utility.wm_trace:
        print(">>> L-M begins")
        print(">>> ||x0|| = %.10f" % fnorm)

    # region : initialize other parameters
    # -----------------------------------------
    nfev = 1
    m = fvec.size
    n = x.size
    ldfjac = m
    if m < n:
        raise ValueError('!!! m < n in lmdif')
    if maxfev <= 0:
        maxfev = 200 * (n + 1)
    # > check wa4 and qtf
    if wa4 is None or wa4.size is not m:
        wa4 = np.zeros(m, data_type)
    if qtf is None or qtf.size is not n:
        qtf = np.zeros(n, data_type)
    # ------------------------------------------
    # endregion : initialize other parameters

    # endregion : Preparation before main loop

    # region : Main loop

    # > initialize levenberg-marquardt parameter and iteration counter
    lam = data_type(0.0)
    iter = 1

    # > begin outer loop
    while True:
        if utility.wm_trace:
            print(">>> Step %d:" % iter)
        # > calculate the jacobian matrix
        # :: evaluate J(x) -> fjac: m by n
        fjac = jac(func, x, args, fvec, epsfcn)
        nfev += n

        # > compute the qr factorization of the jacobian
        # ::
        # ::                  /  R  \         n by n
        # ::     J * P = Q * |       |
        # ::                  \  0  /   (m - n) by n
        # ::      t
        # ::     Q  = H_n * ... H_2 * H_1
        # ::
        # ::    For H in { H_1, H_2, ..., H_n }, and arbitrary A
        # ::                        t
        # ::    H = I - beta * v * v
        # ::                     t                  t
        # ::    H * A = A - v * w,      w = beta * A * v
        # ::    information of P -> ipvt
        # ::    R -> rdiag and strict upper trapezoidal part of fjac
        # ::    { H_k }_k -> lower trapezoidal part of fjac
        ipvt, rdiag, acnorm = qr(m, n, fjac, ldfjac, True)

        # > on the first iteration
        if iter is 1:
            # >> if the diag is None, scale according to the norms of
            #    the columns of the initial jacobian
            if diag is None:
                diag = np.zeros(n, data_type)
                for j in range(n):
                    diag[j] = qrfac.acnorm[j]
                    if diag[j] == 0.0:
                        diag[j] = 1.0
            # >> calculate the norm of the scaled x and initialize
            #    the step bound delta
            wa3 = qrfac.wa  # 'wa3' is a name left over by lmdif
            for j in range(n):
                wa3[j] = diag[j] * x[j]
            xnorm = enorm(wa3)
            delta = factor * xnorm
            if delta == 0.0:
                delta = data_type(factor)

        # > form (q^T)*fvec and store the first n components in qtf
        # :: see x_{NG} = - PI * R^{-1} * Q_1^T * fvec
        # :: H * r = r - v * beta * r^T * v
        for i in range(m):
            wa4[i] = fvec[i]
        for j in range(n):  # altogether n times transformation
            # :: here the lower trapezoidal part of fjac contains
            #    a factored form of q, in other words, a set of v
            if fjac[j + j * ldfjac] != 0:
                sum = data_type(0.0)  # r^T * v
                for i in range(j, m):
                    sum += fjac[i + j * ldfjac] * wa4[i]
                # :: mul -beta
                temp = -sum / fjac[j + j * ldfjac]
                for i in range(j, m):
                    wa4[i] += fjac[i + j * ldfjac] * temp
            # restore the diag of R in fjac
            fjac[j + j * ldfjac] = qrfac.rdiag[j]
            qtf[j] = wa4[j]

        # > compute the norm(inf norm) of the scaled gradient
        #         t           t
        # :: g = J * r = P * R * qtf
        #
        gnorm = data_type(0.0)
        wa2 = qrfac.acnorm
        if fnorm != 0:
            for j in range(n):
                # >> get index
                l = ipvt[j] - 1
                if wa2[l] != 0.0:
                    sum = data_type(0.0)
                    for i in range(j + 1):
                        sum += fjac[i + j * ldfjac] * (qtf[i] / fnorm)
                    # >>> computing max
                    d1 = np.abs(sum / wa2[l])
                    gnorm = max(gnorm, d1)

        if utility.wm_trace:
            print("       ||df|| = %.10f, nfev = %d" % (gnorm, nfev))
        # > test for convergence of the gradient norm
        if gnorm <= gtol:
            ier = 4
            break

        # > rescale if necessary
        if mode is not 2:
            for j in range(n):
                # >> compute max
                d1 = diag[j]
                d2 = wa2[j]
                diag[j] = max(d1, d2)

        # > beginning of the inner loop
        while True:
            if utility.wm_trace:
                print("    => try delta = %.10f:" % delta)
                if False:
                    utility.lam_trace = True
                    print("--" * 26 + " lmpar begin")
            # > determine the levenberg-marquardt parameter
            lam, wa1, sdiag = lm_lambda(n, fjac, ldfjac, ipvt,
                                        diag, qtf, delta, lam)
            if utility.lam_trace:
                utility.lam_trace = False
                print("--" * 26 + " lmpar end")
            # store the direction p and x + p. calculate the norm of p
            for j in range(n):
                wa1[j] = -wa1[j]
                wa2[j] = x[j] + wa1[j]
                wa3[j] = diag[j] * wa1[j]
            # :: pnorm = || D * p ||_2
            pnorm = enorm(wa3)

            # > on the first iteration, adjust the initial step bound
            if iter is 1:
                delta = min(delta, pnorm)

            # > evaluate the function at x + p and calculate its norm
            wa4 = func(wa2, *args)
            nfev += 1
            fnorm1 = enorm(wa4)

            # > compute the scaled actual reduction
            act_red = -1
            if p1 * fnorm1 < fnorm:
                # compute 2nd power
                d1 = fnorm1 / fnorm
                act_red = 1.0 - d1 * d1

            # > compute the scaled predicted reduction and the
            #   scaled directional derivative
            #
            # :: pre_red = (m(0) - m(p)) / m(0)
            # ::              t   t           t
            # ::         =  (p * J * J * p + J * r * p) / m(0)
            #
            # ::               t   t           t   t
            # :: J = Q * R => p * J * J * p = p * R * R * p
            # ::
            # :: m(0) = fnorm * fnorm
            for j in range(n):
                wa3[j] = 0
                l = ipvt[j] - 1
                temp = wa1[l]
                for i in range(j + 1):
                    wa3[i] += fjac[i + j * ldfjac] * temp
            # :: now wa3 stores J * p
            temp1 = enorm(wa3) / fnorm
            #                             t
            # :: lam * p = - grad_m(p) = J * r
            temp2 = (np.sqrt(lam) * pnorm) / fnorm
            # :: TODO -  ... / p5
            pre_red = temp1 * temp1 + temp2 * temp2 / p5
            dir_der = -(temp1 * temp1 + temp2 * temp2)

            # > compute the ratio of the actual to the predicted
            #   reduction
            ratio = 0.0
            if pre_red != 0:
                ratio = act_red / pre_red

            if utility.wm_trace:
                print("          ratio = %.10f, nfev = %d" % (ratio, nfev))

            # > update the step bound
            if ratio <= p25:
                if act_red >= 0.0:
                    temp = p5
                else:
                    temp = p5 * dir_der / (dir_der + p5 * act_red)
                if p1 * fnorm1 >= fnorm or temp < p1:
                    temp = p1
                # >> compute min, shrink the trust region
                d1 = pnorm / p1
                delta = temp * min(delta, d1)
                lam /= temp
                if utility.wm_trace:
                    print("          delta ↓ -> %.10f:" % delta)
            else:
                if lam == 0.0 or ratio >= p75:
                    # >> expand the trust region
                    delta = pnorm / p5
                    lam = p5 * lam
                    if utility.wm_trace:
                        print("          delta ↑ -> %.10f:" % delta)

            # > test for successful iteration
            if ratio >= p0001:
                # >> successful iteration. update x, fvec
                #    and their norms
                for j in range(n):
                    x[j] = wa2[j]
                    wa2[j] = diag[j] * x[j]
                for i in range(m):
                    fvec[i] = wa4[i]
                xnorm = enorm(wa2)

                if utility.wm_trace:
                    print("    √ ||x|| ↓ %.10f -> %.10f" %
                          (fnorm - fnorm1, fnorm1))

                fnorm = fnorm1
                iter += 1
            elif utility.wm_trace:
                print("       × ||x|| not changed")


            # > test for convergence
            if np.abs(act_red) <= ftol and pre_red <= ftol \
                    and p5 * ratio <= 1.0:
                ier = 1
            if delta <= xtol * xnorm:
                ier = 2
            if np.abs(act_red) <= ftol and pre_red <= ftol \
                    and p5 * ratio <= 1.0 and ier is 2:
                ier = 3
            if ier is not 0:
                break

            # > test for termination and stringent tolerances
            if nfev >= maxfev:
                ier = 5
            if np.abs(act_red) <= eps_machine and pre_red <= \
                    eps_machine and p5 * ratio <= 1.0:
                ier = 6
            if delta <= eps_machine * xnorm:
                ier = 7
            if gnorm <= eps_machine:
                ier = 8
            if ier is not 0:
                break
            tmp = 1
            if ratio >= p0001:
                break

        if ier is not 0:
            break

    # endregion : Main loop

    # > wrap results
    errors = {0: ["Improper input parameters.", TypeError],
              1: ["Both actual and predicted relative reductions "
                  "in the sum of squares are at most %f * 1e-8" %
                  (ftol * 1e8), None],
              2: ["The relative error between two consecutive "
                  "iterates is at most %f * 1e-8" % (xtol * 1e8), None],
              3: ["Both actual and predicted relative reductions in "
                  "the sum of squares\n  are at most %f and the "
                  "relative error between two consecutive "
                  "iterates is at \n  most %f" % (ftol, xtol), None],
              4: ["The cosine of the angle between func(x) and any "
                  "column of the\n  Jacobian is at most %f in "
                  "absolute value" % gtol, None],
              5: ["Number of calls to function has reached "
                  "maxfev = %d." % maxfev, ValueError],
              6: ["ftol=%f is too small, no further reduction "
                  "in the sum of squares\n  is possible.""" % ftol,
                  ValueError],
              7: ["xtol=%f is too small, no further improvement in "
                  "the approximate\n  solution is possible." % xtol,
                  ValueError],
              8: ["gtol=%f is too small, func(x) is orthogonal to the "
                  "columns of\n  the Jacobian to machine "
                  "precision." % gtol, ValueError],
              'unknown': ["Unknown error.", TypeError]}

    if ier not in [1, 2, 3, 4] and not full_output:
        if ier in [5, 6, 7, 8]:
            print("!!! leastsq warning: %s" % errors[ier][0])

    mesg = errors[ier][0]

    if utility.wm_trace:
        print(">>> " + mesg)

    if full_output:
        cov_x = None
        if ier in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), ipvt - 1, 0)
            r = triu(transpose(fjac.reshape(n, m))[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except (LinAlgError, ValueError):
                pass
        dct = {'fjac': fjac, 'fvec': fvec, 'ipvt': ipvt,
               'nfev': nfev, 'qtf': qtf}
        return x, cov_x, dct, mesg, ier
    else:
        return x, ier
Exemplo n.º 23
0
    def leastsq(self, **kws):
        """
        use Levenberg-Marquardt minimization to perform fit.
        This assumes that ModelParameters have been stored,
        and a function to minimize has been properly set up.

        This wraps scipy.optimize.leastsq, and keyword arguments are passed
        directly as options to scipy.optimize.leastsq

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        writes outputs to many internal attributes, and
        returns True if fit was successful, False if not.
        """
        self.prepare_fit()
        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
                     gtol=1.e-7, maxfev=2000*(self.nvarys+1), Dfun=None)

        lskws.update(self.kws)
        lskws.update(kws)

        if lskws['Dfun'] is not None:
            self.jacfcn = lskws['Dfun']
            lskws['Dfun'] = self.__jacobian

        # suppress runtime warnings during fit and error analysis
        orig_warn_settings = np.geterr()
        np.seterr(all='ignore')
        lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
        _best, _cov, infodict, errmsg, ier = lsout

        self.residual = resid = infodict['fvec']
        self.ier = ier
        self.lmdif_message = errmsg
        self.message = 'Fit succeeded.'
        self.success = ier in [1, 2, 3, 4]

        if ier == 0:
            self.message = 'Invalid Input Parameters.'
        elif ier == 5:
            self.message = self.err_maxfev % lskws['maxfev']
        else:
            self.message = 'Tolerance seems to be too small.'

        self.nfev = infodict['nfev']
        self.ndata = len(resid)

        sum_sqr = (resid**2).sum()
        self.chisqr = sum_sqr
        self.nfree = (self.ndata - self.nvarys)
        self.redchi = sum_sqr / self.nfree

        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad = ones_like(_best)
        vbest = ones_like(_best)

        # ensure that _best, vbest, and grad are not
        # broken 1-element ndarrays.
        if len(np.shape(_best)) == 0:
            _best = np.array([_best])
        if len(np.shape(vbest)) == 0:
            vbest = np.array([vbest])
        if len(np.shape(grad)) == 0:
            grad = np.array([grad])

        for ivar, varname in enumerate(self.var_map):
            par = self.params[varname]
            grad[ivar] = par.scale_gradient(_best[ivar])
            vbest[ivar] = par.value

        # modified from JJ Helmus' leastsqbound.py
        infodict['fjac'] = transpose(transpose(infodict['fjac']) /
                                     take(grad, infodict['ipvt'] - 1))
        rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
                   take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
        try:
            self.covar = inv(dot(transpose(rvec), rvec))
        except (LinAlgError, ValueError):
            self.covar = None

        has_expr = False
        for par in self.params.values():
            par.stderr, par.correl = 0, None
            has_expr = has_expr or par.expr is not None

        if self.covar is not None:
            if self.scale_covar:
                self.covar = self.covar * sum_sqr / self.nfree

            for ivar, varname in enumerate(self.var_map):
                par = self.params[varname]
                par.stderr = sqrt(self.covar[ivar, ivar])
                par.correl = {}
                for jvar, varn2 in enumerate(self.var_map):
                    if jvar != ivar:
                        par.correl[varn2] = (self.covar[ivar, jvar] /
                             (par.stderr * sqrt(self.covar[jvar, jvar])))

            uvars = None
            if has_expr:
                # uncertainties on constrained parameters:
                #   get values with uncertainties (including correlations),
                #   temporarily set Parameter values to these,
                #   re-evaluate contrained parameters to extract stderr
                #   and then set Parameters back to best-fit value
                try:
                    uvars = uncertainties.correlated_values(vbest, self.covar)
                except (LinAlgError, ValueError):
                    uvars = None

                if uvars is not None:
                    for pname, par in self.params.items():
                        eval_stderr(par, uvars, self.var_map,
                                    self.params, self.asteval)
                    # restore nominal values
                    for v, nam in zip(uvars, self.var_map):
                        self.asteval.symtable[nam] = v.nominal_value

        self.errorbars = True
        if self.covar is None:
            self.errorbars = False
            self.message = '%s. Could not estimate error-bars'

        np.seterr(**orig_warn_settings)
        self.unprepare_fit()
        return self.success
Exemplo n.º 24
0
def SN(alpha, beta, phi):
    betaPhiTphi = beta * dot(phi.transpose(), phi)
    alphaI = alpha * identity(betaPhiTphi.shape[0])
    SNinverse = alphaI + betaPhiTphi
    return inv(SNinverse)
Exemplo n.º 25
0
    def leastsq(self, scale_covar=True, **kws):
        """
        use Levenberg-Marquardt minimization to perform fit.
        This assumes that ModelParameters have been stored,
        and a function to minimize has been properly set up.

        This wraps scipy.optimize.leastsq, and keyword arguments are passed
        directly as options to scipy.optimize.leastsq

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        writes outputs to many internal attributes, and
        returns True if fit was successful, False if not.
        """
        # print 'RUNNING LEASTSQ'
        self.prepare_fit()
        lskws = dict(full_output=1, xtol=1.0e-7, ftol=1.0e-7, gtol=1.0e-7, maxfev=2000 * (self.nvarys + 1), Dfun=None)

        lskws.update(self.kws)
        lskws.update(kws)

        if lskws["Dfun"] is not None:
            self.jacfcn = lskws["Dfun"]
            lskws["Dfun"] = self.__jacobian

        lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
        _best, _cov, infodict, errmsg, ier = lsout

        self.residual = resid = infodict["fvec"]

        self.ier = ier
        self.lmdif_message = errmsg
        self.message = "Fit succeeded."
        self.success = ier in [1, 2, 3, 4]

        if ier == 0:
            self.message = "Invalid Input Parameters."
        elif ier == 5:
            self.message = self.err_maxfev % lskws["maxfev"]
        else:
            self.message = "Tolerance seems to be too small."

        self.nfev = infodict["nfev"]
        self.ndata = len(resid)

        sum_sqr = (resid ** 2).sum()
        self.chisqr = sum_sqr
        self.nfree = self.ndata - self.nvarys
        self.redchi = sum_sqr / self.nfree

        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad = ones_like(_best)
        vbest = ones_like(_best)
        for ivar, varname in enumerate(self.var_map):
            par = self.params[varname]
            grad[ivar] = par.scale_gradient(_best[ivar])
            vbest[ivar] = par.value
        # modified from JJ Helmus' leastsqbound.py
        infodict["fjac"] = transpose(transpose(infodict["fjac"]) / take(grad, infodict["ipvt"] - 1))
        rvec = dot(triu(transpose(infodict["fjac"])[: self.nvarys, :]), take(eye(self.nvarys), infodict["ipvt"] - 1, 0))
        try:
            cov = inv(dot(transpose(rvec), rvec))
        except (LinAlgError, ValueError):
            cov = None

        for par in self.params.values():
            par.stderr, par.correl = 0, None

        self.covar = cov
        if cov is None:
            self.errorbars = False
            self.message = "%s. Could not estimate error-bars"
        else:
            self.errorbars = True
            if self.scale_covar:
                self.covar = cov = cov * sum_sqr / self.nfree
            for ivar, varname in enumerate(self.var_map):
                par = self.params[varname]
                par.stderr = sqrt(cov[ivar, ivar])
                par.correl = {}
                for jvar, varn2 in enumerate(self.var_map):
                    if jvar != ivar:
                        par.correl[varn2] = cov[ivar, jvar] / (par.stderr * sqrt(cov[jvar, jvar]))

        # set uncertainties on constrained parameters.
        # Note that first we set all named params to
        # have values that include uncertainties, then
        # evaluate all constrained parameters, then set
        # the values back to the nominal values.
        if HAS_UNCERT and self.covar is not None:
            uvars = uncertainties.correlated_values(vbest, self.covar)
            for v, nam in zip(uvars, self.var_map):
                self.asteval.symtable[nam] = v

            for pname, par in self.params.items():
                if hasattr(par, "ast"):
                    try:
                        out = self.asteval.run(par.ast)
                        par.stderr = out.std_dev()
                    except:
                        pass

            for v, nam in zip(uvars, self.var_map):
                self.asteval.symtable[nam] = v.nominal_value

        for par in self.params.values():
            if hasattr(par, "ast"):
                delattr(par, "ast")
        return self.success
Exemplo n.º 26
0
def leastsq(func,x0,args=(),Dfun=None,full_output=0,col_deriv=0,ftol=1.49012e-8,xtol=1.49012e-8,gtol=0.0,maxfev=0,epsfcn=0.0,factor=100,diag=None,warning=True):
    """Minimize the sum of squares of a set of equations.

  Description:

    Return the point which minimizes the sum of squares of M
    (non-linear) equations in N unknowns given a starting estimate, x0,
    using a modification of the Levenberg-Marquardt algorithm.

                    x = arg min(sum(func(y)**2,axis=0))
                             y

  Inputs:

    func -- A Python function or method which takes at least one
            (possibly length N vector) argument and returns M
            floating point numbers.
    x0 -- The starting estimate for the minimization.
    args -- Any extra arguments to func are placed in this tuple.
    Dfun -- A function or method to compute the Jacobian of func with
            derivatives across the rows. If this is None, the
            Jacobian will be estimated.
    full_output -- non-zero to return all optional outputs.
    col_deriv -- non-zero to specify that the Jacobian function
                 computes derivatives down the columns (faster, because
                 there is no transpose operation).
        warning -- True to print a warning message when the call is
             unsuccessful; False to suppress the warning message.

  Outputs: (x, {cov_x, infodict, mesg}, ier)

    x -- the solution (or the result of the last iteration for an
         unsuccessful call.

    cov_x -- uses the fjac and ipvt optional outputs to construct an
             estimate of the jacobian around the solution.
             None if a singular matrix encountered (indicates
             very flat curvature in some direction).  This
             matrix must be multiplied by the residual standard
             deviation to get the covariance of the parameter 
             estimates --- see curve_fit.
    infodict -- a dictionary of optional outputs with the keys:
                'nfev' : the number of function calls
                'fvec' : the function evaluated at the output
                'fjac' : A permutation of the R matrix of a QR
                         factorization of the final approximate
                         Jacobian matrix, stored column wise.
                         Together with ipvt, the covariance of the
                         estimate can be approximated.
                'ipvt' : an integer array of length N which defines
                         a permutation matrix, p, such that
                         fjac*p = q*r, where r is upper triangular
                         with diagonal elements of nonincreasing
                         magnitude. Column j of p is column ipvt(j)
                         of the identity matrix.
                'qtf'  : the vector (transpose(q) * fvec).
    mesg -- a string message giving information about the cause of failure.
    ier -- an integer flag.  If it is equal to 1, 2, 3 or 4, the
           solution was found.  Otherwise, the solution was not
           found. In either case, the optional output variable 'mesg'
           gives more information.


  Extended Inputs:

   ftol -- Relative error desired in the sum of squares.
   xtol -- Relative error desired in the approximate solution.
   gtol -- Orthogonality desired between the function vector
           and the columns of the Jacobian.
   maxfev -- The maximum number of calls to the function. If zero,
             then 100*(N+1) is the maximum where N is the number
             of elements in x0.
   epsfcn -- A suitable step length for the forward-difference
             approximation of the Jacobian (for Dfun=None). If
             epsfcn is less than the machine precision, it is assumed
             that the relative errors in the functions are of
             the order of the machine precision.
   factor -- A parameter determining the initial step bound
             (factor * || diag * x||). Should be in interval (0.1,100).
   diag -- A sequency of N positive entries that serve as a
           scale factors for the variables.

  Remarks:

    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

  See also:

      scikits.openopt, which offers a unified syntax to call this and other solvers

      fmin, fmin_powell, fmin_cg,
             fmin_bfgs, fmin_ncg -- multivariate local optimizers

      fmin_l_bfgs_b, fmin_tnc,
             fmin_cobyla -- constrained multivariate optimizers

      anneal, brute -- global optimizers

      fminbound, brent, golden, bracket -- local scalar minimizers

      fsolve -- n-dimenstional root-finding

      brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding

      fixed_point -- scalar and vector fixed-point finder

      curve_fit -- find parameters for a curve-fitting problem. 

    """
    x0 = array(x0,ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args,)
    m = check_func(func,x0,args,n)[0]
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200*(n+1)
        retval = _minpack._lmdif(func,x0,args,full_output,ftol,xtol,gtol,maxfev,epsfcn,factor,diag)
    else:
        if col_deriv:
            check_func(Dfun,x0,args,n,(n,m))
        else:
            check_func(Dfun,x0,args,n,(m,n))
        if (maxfev == 0):
            maxfev = 100*(n+1)
        retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag)

    errors = {0:["Improper input parameters.", TypeError],
              1:["Both actual and predicted relative reductions in the sum of squares\n  are at most %f" % ftol, None],
              2:["The relative error between two consecutive iterates is at most %f" % xtol, None],
              3:["Both actual and predicted relative reductions in the sum of squares\n  are at most %f and the relative error between two consecutive iterates is at \n  most %f" % (ftol,xtol), None],
              4:["The cosine of the angle between func(x) and any column of the\n  Jacobian is at most %f in absolute value" % gtol, None],
              5:["Number of calls to function has reached maxfev = %d." % maxfev, ValueError],
              6:["ftol=%f is too small, no further reduction in the sum of squares\n  is possible.""" % ftol, ValueError],
              7:["xtol=%f is too small, no further improvement in the approximate\n  solution is possible." % xtol, ValueError],
              8:["gtol=%f is too small, func(x) is orthogonal to the columns of\n  the Jacobian to machine precision." % gtol, ValueError],
              'unknown':["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if (info not in [1,2,3,4] and not full_output):
        if info in [5,6,7,8]:
            if warning:  print "Warning: " + errors[info][0]
        else:
            try:
                raise errors[info][1], errors[info][0]
            except KeyError:
                raise errors['unknown'][1], errors['unknown'][0]

    if n == 1:
        retval = (retval[0][0],) + retval[1:]

    mesg = errors[info][0]
    if full_output:
        from numpy.dual import inv
        from numpy.linalg import LinAlgError
        perm = take(eye(n),retval[1]['ipvt']-1,0)
        r = triu(transpose(retval[1]['fjac'])[:n,:])
        R = dot(r, perm)
        try:
            cov_x = inv(dot(transpose(R),R))
        except LinAlgError:
            cov_x = None
        return (retval[0], cov_x) + retval[1:-1] + (mesg,info)
    else:
        return (retval[0], info)
Exemplo n.º 27
0
def leastsqbound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
                 col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
                 gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
    """
    Bounded minimization of the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple
        Any extra arguments to func are placed in this tuple.
    bounds : list
        ``(min, max)`` pairs for each element in ``x``, defining
        the bounds on that parameter. Use None for one of ``min`` or
        ``max`` when there is no bound in that direction.
    Dfun : callable
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool
        non-zero to return all optional outputs.
    col_deriv : bool
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float
        Relative error desired in the sum of squares.
    xtol : float
        Relative error desired in the approximate solution.
    gtol : float
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int
        The maximum number of calls to the function. If zero, then 100*(N+1) is
        the maximum where N is the number of elements in x0.
    epsfcn : float
        A suitable step length for the forward-difference approximation of the
        Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
        it is assumed that the relative errors in the functions are of the
        order of the machine precision.
    factor : float
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence
        N positive entries that serve as a scale factors for the variables.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution.  ``None`` if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual standard deviation to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the key s::

            - 'nfev' : the number of function calls
            - 'fvec' : the function evaluated at the output
            - 'fjac' : A permutation of the R matrix of a QR
                     factorization of the final approximate
                     Jacobian matrix, stored column wise.
                     Together with ipvt, the covariance of the
                     estimate can be approximated.
            - 'ipvt' : an integer array of length N which defines
                     a permutation matrix, p, such that
                     fjac*p = q*r, where r is upper triangular
                     with diagonal elements of nonincreasing
                     magnitude. Column j of p is column ipvt(j)
                     of the identity matrix.
            - 'qtf'  : the vector (transpose(q) * fvec).

    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    cov_x is a Jacobian approximation to the Hessian of the least squares
    objective function.
    This approximation assumes that the objective function is based on the
    difference between some observed target data (ydata) and a (non-linear)
    function of the parameters `f(xdata, params)` ::

           func(params) = ydata - f(xdata, params)

    so that the objective function is ::

           min   sum((ydata - f(xdata, params))**2, axis=0)
         params

    Contraints on the parameters are enforced using an internal parameter list
    with appropiate transformations such that these internal parameters can be
    optimized without constraints. The transfomation between a given internal
    parameter, p_i, and a external parameter, p_e, are as follows:

    With ``min`` and ``max`` bounds defined ::

        p_i = arcsin((2 * (p_e - min) / (max - min)) - 1.)
        p_e = min + ((max - min) / 2.) * (sin(p_i) + 1.)

    With only ``max`` defined ::

        p_i = sqrt((max - p_e + 1.)**2 - 1.)
        p_e = max + 1. - sqrt(p_i**2 + 1.)

    With only ``min`` defined ::

        p_i = sqrt((p_e - min + 1.)**2 - 1.)
        p_e = min - 1. + sqrt(p_i**2 + 1.)

    These transfomations are used in the MINUIT package, and described in
    detail in the section 1.3.1 of the MINUIT User's Guide.

    To Do
    -----
    Currently the ``factor`` and ``diag`` parameters scale the
    internal parameter list, but should scale the external parameter list.

    The `qtf` vector in the infodic dictionary reflects internal parameter
    list, it should be correct to reflect the external parameter list.

    References
    ----------
    * F. James and M. Winkler. MINUIT User's Guide, July 16, 2004.

    """
    # use leastsq if no bounds are present
    if bounds is None:
        return leastsq(func, x0, args, Dfun, full_output, col_deriv,
                       ftol, xtol, gtol, maxfev, epsfcn, factor, diag)

    # create function which convert between internal and external parameters
    i2e = _internal2external_func(bounds)
    e2i = _external2internal_func(bounds)

    x0 = asarray(x0).flatten()
    i0 = e2i(x0)
    n = len(x0)
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')
    if not isinstance(args, tuple):
        args = (args,)
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps

    # define a wrapped func which accept internal parameters, converts them
    # to external parameters and calls func
    def wfunc(x, *args):
        return func(i2e(x), *args)

    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
                                 gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)

        def wDfun(x, *args):  # wrapped Dfun
            return Dfun(i2e(x), *args)

        retval = _minpack._lmder(wfunc, wDfun, i0, args, full_output,
                                 col_deriv, ftol, xtol, gtol, maxfev,
                                 factor, diag)

    errors = {0: ["Improper input parameters.", TypeError],
              1: ["Both actual and predicted relative reductions "
                  "in the sum of squares\n  are at most %f" % ftol, None],
              2: ["The relative error between two consecutive "
                  "iterates is at most %f" % xtol, None],
              3: ["Both actual and predicted relative reductions in "
                  "the sum of squares\n  are at most %f and the "
                  "relative error between two consecutive "
                  "iterates is at \n  most %f" % (ftol, xtol), None],
              4: ["The cosine of the angle between func(x) and any "
                  "column of the\n  Jacobian is at most %f in "
                  "absolute value" % gtol, None],
              5: ["Number of calls to function has reached "
                  "maxfev = %d." % maxfev, ValueError],
              6: ["ftol=%f is too small, no further reduction "
                  "in the sum of squares\n  is possible.""" % ftol,
                  ValueError],
              7: ["xtol=%f is too small, no further improvement in "
                  "the approximate\n  solution is possible." % xtol,
                  ValueError],
              8: ["gtol=%f is too small, func(x) is orthogonal to the "
                  "columns of\n  the Jacobian to machine "
                  "precision." % gtol, ValueError],
              'unknown': ["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    x = i2e(retval[0])  # internal params to external params

    if full_output:
        # convert fjac from internal params to external
        grad = _internal2external_grad(retval[0], bounds)
        retval[1]['fjac'] = (retval[1]['fjac'].T / take(grad,
                             retval[1]['ipvt'] - 1)).T
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except LinAlgError:
                pass
        return (x, cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (x, info)
Exemplo n.º 28
0
Arquivo: util.py Projeto: puwe/DAMASK
def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
                col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
                gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
  from scipy.optimize import _minpack
  """
  Non-linear least square fitting (Levenberg-Marquardt method) with 
  bounded parameters.
  the codes of transformation between int <-> ext refers to the work of
  Jonathan J. Helmus: https://github.com/jjhelmus/leastsqbound-scipy
  other codes refers to the source code of minpack.py:
  ..\Lib\site-packages\scipy\optimize\minpack.py

  An internal parameter list is used to enforce contraints on the fitting 
  parameters. The transfomation is based on that of MINUIT package. 
  please see: F. James and M. Winkler. MINUIT User's Guide, 2004.

  bounds : list
    (min, max) pairs for each parameter, use None for 'min' or 'max' 
    when there is no bound in that direction. 
    For example: if there are two parameters needed to be fitting, then
    bounds is [(min1,max1), (min2,max2)]
    
  This function is based on 'leastsq' of minpack.py, the annotation of
  other parameters can be found in 'leastsq'.
  ..\Lib\site-packages\scipy\optimize\minpack.py
  """
 
  def _check_func(checker, argname, thefunc, x0, args, numinputs,
                output_shape=None):
    """The same as that of minpack.py"""
    res = np.atleast_1d(thefunc(*((x0[:numinputs],) + args)))
    if (output_shape is not None) and (shape(res) != output_shape):
        if (output_shape[0] != 1):
            if len(output_shape) > 1:
                if output_shape[1] == 1:
                    return shape(res)
            msg = "%s: there is a mismatch between the input and output " \
                  "shape of the '%s' argument" % (checker, argname)
            func_name = getattr(thefunc, '__name__', None)
            if func_name:
                msg += " '%s'." % func_name
            else:
                msg += "."
            raise TypeError(msg)
    if np.issubdtype(res.dtype, np.inexact):
        dt = res.dtype
    else:
        dt = dtype(float)
    return shape(res), dt
  
  def _int2extGrad(p_int, bounds):
    """Calculate the gradients of transforming the internal (unconstrained) to external (constrained) parameter."""
    grad = np.empty_like(p_int)
    for i, (x, bound) in enumerate(zip(p_int, bounds)):
        lower, upper = bound
        if lower is None and upper is None:  # No constraints
            grad[i] = 1.0
        elif upper is None:                  # only lower bound
            grad[i] =  x/np.sqrt(x*x + 1.0)
        elif lower is None:                  # only upper bound
            grad[i] = -x/np.sqrt(x*x + 1.0)
        else:                                # lower and upper bounds
            grad[i] = (upper - lower)*np.cos(x)/2.0
    return grad
  
  def _int2extFunc(bounds):
    """transform internal parameters into external parameters."""
    local = [_int2extLocal(b) for b in bounds]
    def _transform_i2e(p_int):
        p_ext = np.empty_like(p_int)
        p_ext[:] = [i(j) for i, j in zip(local, p_int)]
        return p_ext
    return _transform_i2e
  
  def _ext2intFunc(bounds):
    """transform external parameters into internal parameters."""
    local = [_ext2intLocal(b) for b in bounds]
    def _transform_e2i(p_ext):
        p_int = np.empty_like(p_ext)
        p_int[:] = [i(j) for i, j in zip(local, p_ext)]
        return p_int
    return _transform_e2i

  def _int2extLocal(bound):
    """transform a single internal parameter to an external parameter."""
    lower, upper = bound
    if lower is None and upper is None:      # no constraints
        return lambda x: x
    elif upper is None:                      # only lower bound
        return lambda x: lower - 1.0 + np.sqrt(x*x + 1.0)
    elif lower is None:                      # only upper bound
        return lambda x: upper + 1.0 - np.sqrt(x*x + 1.0)
    else:
        return lambda x: lower + ((upper - lower)/2.0)*(np.sin(x) + 1.0)
  
  def _ext2intLocal(bound):
    """transform a single external parameter to an internal parameter."""
    lower, upper = bound
    if lower is None and upper is None:  # no constraints
        return lambda x: x
    elif upper is None:                  # only lower bound
        return lambda x: np.sqrt((x - lower + 1.0)**2 - 1.0)
    elif lower is None:                  # only upper bound
        return lambda x: np.sqrt((x - upper - 1.0)**2 - 1.0)
    else:
        return lambda x: np.arcsin((2.0*(x - lower)/(upper - lower)) - 1.0)
  
  i2e = _int2extFunc(bounds)
  e2i = _ext2intFunc(bounds)
  
  x0 = np.asarray(x0).flatten()
  n = len(x0)

  if len(bounds) != n:
      raise ValueError('the length of bounds is inconsistent with the number of parameters ')
  
  if not isinstance(args, tuple):
      args = (args,)
  
  shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
  m = shape[0]

  if n > m:
      raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
  if epsfcn is None:
      epsfcn = np.finfo(dtype).eps

  def funcWarp(x, *args):
      return func(i2e(x), *args)

  xi0 = e2i(x0)
  
  if Dfun is None:
      if maxfev == 0:
          maxfev = 200*(n + 1)
      retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol,
                               gtol, maxfev, epsfcn, factor, diag)
  else:
      if col_deriv:
          _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
      else:
          _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
      if maxfev == 0:
          maxfev = 100*(n + 1)

      def DfunWarp(x, *args):
          return Dfun(i2e(x), *args)

      retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output, col_deriv,
                               ftol, xtol, gtol, maxfev, factor, diag)

  errors = {0: ["Improper input parameters.", TypeError],
            1: ["Both actual and predicted relative reductions "
                "in the sum of squares\n  are at most %f" % ftol, None],
            2: ["The relative error between two consecutive "
                "iterates is at most %f" % xtol, None],
            3: ["Both actual and predicted relative reductions in "
                "the sum of squares\n  are at most %f and the "
                "relative error between two consecutive "
                "iterates is at \n  most %f" % (ftol, xtol), None],
            4: ["The cosine of the angle between func(x) and any "
                "column of the\n  Jacobian is at most %f in "
                "absolute value" % gtol, None],
            5: ["Number of calls to function has reached "
                "maxfev = %d." % maxfev, ValueError],
            6: ["ftol=%f is too small, no further reduction "
                "in the sum of squares\n  is possible.""" % ftol,
                ValueError],
            7: ["xtol=%f is too small, no further improvement in "
                "the approximate\n  solution is possible." % xtol,
                ValueError],
            8: ["gtol=%f is too small, func(x) is orthogonal to the "
                "columns of\n  the Jacobian to machine "
                "precision." % gtol, ValueError],
            'unknown': ["Unknown error.", TypeError]}

  info = retval[-1]    # The FORTRAN return value
  
  if info not in [1, 2, 3, 4] and not full_output:
      if info in [5, 6, 7, 8]:
          np.warnings.warn(errors[info][0], RuntimeWarning)
      else:
          try:
              raise errors[info][1](errors[info][0])
          except KeyError:
              raise errors['unknown'][1](errors['unknown'][0])

  mesg = errors[info][0]
  x = i2e(retval[0])

  if full_output:
      grad = _int2extGrad(retval[0], bounds)
      retval[1]['fjac'] = (retval[1]['fjac'].T / np.take(grad,
                           retval[1]['ipvt'] - 1)).T
      cov_x = None
      if info in [1, 2, 3, 4]:
          from numpy.dual import inv
          from numpy.linalg import LinAlgError
          perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0)
          r = np.triu(np.transpose(retval[1]['fjac'])[:n, :])
          R = np.dot(r, perm)
          try:
              cov_x = inv(np.dot(np.transpose(R), R))
          except LinAlgError as inverror:
              print(inverror)
              pass
      return (x, cov_x) + retval[1:-1] + (mesg, info)
  else:
      return (x, info)
    def leastsq(self, **kws):
        """
        use Levenberg-Marquardt minimization to perform fit.
        This assumes that ModelParameters have been stored,
        and a function to minimize has been properly set up.

        This wraps scipy.optimize.leastsq, and keyward arguments are passed
        directly as options to scipy.optimize.leastsq

        When possible, this calculates the estimated uncertainties and
        variable correlations from the covariance matrix.

        writes outputs to many internal attributes, and
        returns True if fit was successful, False if not.
        """
        self.prepare_fit(force=True)
        toler = self.toler
        lskws = dict(xtol=toler,
                     ftol=toler,
                     gtol=toler,
                     maxfev=1000 * (self.nvarys + 1),
                     Dfun=None)
        lskws.update(self.kws)
        lskws.update(kws)

        if lskws['Dfun'] is not None:
            self.jacfcn = lskws['Dfun']
            lskws['Dfun'] = self.__jacobian

        lsout = leastsq(self.__residual, self.vars, **lskws)
        del self.vars

        _best, cov, infodict, errmsg, ier = lsout
        resid = infodict['fvec']
        ndata = len(resid)
        chisqr = (resid**2).sum()
        nfree = ndata - self.nvarys
        redchi = chisqr / nfree

        group = self.paramgroup
        # need to map _best values to params, then calculate the
        # grad for the variable parameters
        grad = ones_like(_best)  # holds scaled gradient for variables
        vbest = ones_like(_best)  # holds best values for variables
        named_params = {}  # var names : parameter object
        for ivar, name in enumerate(self.var_names):
            named_params[name] = par = getattr(group, name)
            grad[ivar] = par.scale_gradient(_best[ivar])
            vbest[ivar] = par.value
            par.stderr = 0
            par.correl = {}
            par._uval = None
        # modified from JJ Helmus' leastsqbound.py
        # compute covariance matrix here explicitly...
        infodict['fjac'] = transpose(
            transpose(infodict['fjac']) / take(grad, infodict['ipvt'] - 1))

        rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
                   take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
        try:
            cov = inv(dot(transpose(rvec), rvec))
        except (LinAlgError, ValueError):
            cov = None

        # map covariance matrix to parameter uncertainties
        # and correlations
        if cov is not None:
            if self.scale_covar:
                cov = cov * chisqr / nfree

            # uncertainties for constrained parameters:
            #   get values with uncertainties (including correlations),
            #   temporarily set Parameter values to these,
            #   re-evaluate contrained parameters to extract stderr
            #   and then set Parameters back to best-fit value
            try:
                uvars = uncertainties.correlated_values(vbest, cov)
            except (LinAlgError, ValueError):
                cov, uvars = None, None
            group.covar_vars = self.var_names
            group.covar = cov

            if uvars is not None:
                # set stderr and correlations for variable, named parameters:
                for iv, name in enumerate(self.var_names):
                    p = named_params[name]
                    p.stderr = uvars[iv].std_dev()
                    p._uval = uvars[iv]
                    p.correl = {}
                    for jv, name2 in enumerate(self.var_names):
                        if jv != iv:
                            p.correl[name2] = (cov[iv, jv] /
                                               (p.stderr * sqrt(cov[jv, jv])))
                for nam in dir(self.paramgroup):
                    obj = getattr(self.paramgroup, nam)
                    eval_stderr(obj, uvars, self.var_names, named_params,
                                self._larch)

                # restore nominal values that may have been tweaked to
                # calculate other stderrs
                for uval, nam in zip(uvars, self.var_names):
                    named_params[nam]._val = uval.nominal_value

            # clear any errors evaluting uncertainties
            if self._larch.error:
                self._larch.error = []

        # collect results for output group
        message = 'Fit succeeded.'

        if ier == 0:
            message = 'Invalid Input Parameters.'
        elif ier == 5:
            message = self.err_maxfev % lskws['maxfev']
        elif ier > 5:
            message = 'See lmdif_message.'
        if cov is None:
            message = '%s Could not estimate error-bars' % message

        ofit = group
        if Group is not None:
            ofit = group.fit_details = Group()

        ofit.method = 'leastsq'
        ofit.fjac = infodict['fjac']
        ofit.fvec = infodict['fvec']
        ofit.qtf = infodict['qtf']
        ofit.ipvt = infodict['ipvt']
        ofit.nfev = infodict['nfev']
        ofit.status = ier
        ofit.message = errmsg
        ofit.success = ier in [1, 2, 3, 4]
        ofit.toler = self.toler

        group.residual = resid
        group.message = message
        group.chi_square = chisqr
        group.chi_reduced = redchi
        group.nvarys = self.nvarys
        group.nfree = nfree
        group.errorbars = cov is not None
        return ier
Exemplo n.º 30
0
def mixgauss_prob(data, mu, Sigma, mixmat=None, unit_norm=False):
    """
    % EVAL_PDF_COND_MOG Evaluate the pdf of a conditional mixture of Gaussians
    % function [B, B2] = eval_pdf_cond_mog(data, mu, Sigma, mixmat, unit_norm)
    %
    % Notation: Y is observation, M is mixture component, and both may be conditioned on Q.
    % If Q does not exist, ignore references to Q=j below.
    % Alternatively, you may ignore M if this is a conditional Gaussian.
    %
    % INPUTS:
    % data(:,t) = t'th observation vector 
    %
    % mu(:,k) = E[Y(t) | M(t)=k] 
    % or mu(:,j,k) = E[Y(t) | Q(t)=j, M(t)=k]
    %
    % Sigma(:,:,j,k) = Cov[Y(t) | Q(t)=j, M(t)=k]
    % or there are various faster, special cases:
    %   Sigma() - scalar, spherical covariance independent of M,Q.
    %   Sigma(:,:) diag or full, tied params independent of M,Q. 
    %   Sigma(:,:,j) tied params independent of M. 
    %
    % mixmat(k) = Pr(M(t)=k) = prior
    % or mixmat(j,k) = Pr(M(t)=k | Q(t)=j) 
    % Not needed if M is not defined.
    %
    % unit_norm - optional; if 1, means data(:,i) AND mu(:,i) each have unit norm (slightly faster)
    %
    % OUTPUT:
    % B(t) = Pr(y(t)) 
    % or
    % B(i,t) = Pr(y(t) | Q(t)=i) 
    % B2(i,k,t) = Pr(y(t) | Q(t)=i, M(t)=k) 
    %
    % If the number of mixture components differs depending on Q, just set the trailing
    % entries of mixmat to 0, e.g., 2 components if Q=1, 3 components if Q=2,
    % then set mixmat(1,3)=0. In this case, B2(1,3,:)=1.0.
    """

    if mu.ndim == 1:
        d = len(mu)
        Q = 1
        M = 1
    elif mu.ndim == 2:
        d, Q = mu.shape
        M = 1
    else:
        d, Q, M = mu.shape

    d, T = data.shape

    if mixmat == None:
        mixmat = np.asmatrix(np.ones((Q, 1)))

    # B2 = zeros(Q,M,T); % ATB: not needed allways
    # B = zeros(Q,T);

    if np.isscalar(Sigma):
        mu = np.reshape(mu, (d, Q * M))
        if unit_norm:  # (p-q)'(p-q) = p'p + q'q - 2p'q = n+m -2p'q since p(:,i)'p(:,i)=1
            # avoid an expensive repmat
            print ("unit norm")
            # tic; D = 2 -2*(data'*mu)'; toc
            D = 2 - 2 * np.dot(mu.T * data)
            # tic; D2 = sqdist(data, mu)'; toc
            D2 = sqdist(data, mu).T
            assert approxeq(D, D2)
        else:
            D = sqdist(data, mu).T
        del mu
        del data  # ATB: clear big old data
        # D(qm,t) = sq dist between data(:,t) and mu(:,qm)
        logB2 = -(d / 2.0) * np.log(2 * pi * Sigma) - (1 / (2.0 * Sigma)) * D  # det(sigma*I) = sigma^d
        B2 = np.reshape(np.asarray(np.exp(logB2)), (Q, M, T))
        del logB2  # ATB: clear big old data

    elif Sigma.ndim == 2:  # tied full
        mu = np.reshape(mu, (d, Q * M))
        D = sqdist(data, mu, inv(Sigma)).T
        # D(qm,t) = sq dist between data(:,t) and mu(:,qm)
        logB2 = -(d / 2) * np.log(2 * pi) - 0.5 * logdet(Sigma) - 0.5 * D
        # denom = sqrt(det(2*pi*Sigma));
        # numer = exp(-0.5 * D);
        # B2 = numer/denom;
        B2 = np.reshape(np.asarray(np.exp(logB2)), (Q, M, T))

    elif Sigma.ndim == 3:  # tied across M
        B2 = np.zeros((Q, M, T))
        for j in range(0, Q):
            # D(m,t) = sq dist between data(:,t) and mu(:,j,m)
            if isposdef(Sigma[:, :, j]):
                D = sqdist(data, np.transpose(mu[:, j, :], (0, 2, 1)), inv(Sigma[:, :, j])).T
                logB2 = -(d / 2) * np.log(2 * pi) - 0.5 * logdet(Sigma[:, :, j]) - 0.5 * D
                B2[j, :, :] = np.exp(logB2)
            else:
                logging.error("mixgauss_prob: Sigma(:,:,q=%d) not psd\n" % j)

    else:  # general case
        B2 = np.zeros((Q, M, T))
        for j in range(0, Q):
            for k in range(0, M):
                # if mixmat(j,k) > 0
                B2[j, k, :] = gaussian_prob(data, mu[:, j, k], Sigma[:, :, j, k])

    # B(j,t) = sum_k B2(j,k,t) * Pr(M(t)=k | Q(t)=j)

    # The repmat is actually slower than the for-loop, because it uses too much memory
    # (this is true even for small T).

    # B = squeeze(sum(B2 .* repmat(mixmat, [1 1 T]), 2));
    # B = reshape(B, [Q T]); % undo effect of squeeze in case Q = 1

    B = np.zeros((Q, T))
    if Q < T:
        for q in range(0, Q):
            # B(q,:) = mixmat(q,:) * squeeze(B2(q,:,:)); % squeeze chnages order if M=1
            B[q, :] = np.dot(
                mixmat[q, :], B2[q, :, :]
            )  # vector * matrix sums over m #TODO: had to change this. Is this correct?
    else:
        for t in range(0, T):
            B[:, t] = np.sum(np.asarray(np.multiply(mixmat, B2[:, :, t])), 1)  # sum over m
    # t=toc;fprintf('%5.3f\n', t)

    # tic
    # A = squeeze(sum(B2 .* repmat(mixmat, [1 1 T]), 2));
    # t=toc;fprintf('%5.3f\n', t)
    # assert(approxeq(A,B)) % may be false because of round off error

    return B, B2
Exemplo n.º 31
0
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=0.0,
            factor=100,
            diag=None,
            warning=True):
    """Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple
        Any extra arguments to func are placed in this tuple.
    Dfun : callable
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool
        non-zero to return all optional outputs.
    col_deriv : bool
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float
        Relative error desired in the sum of squares.
    xtol : float
        Relative error desired in the approximate solution.
    gtol : float
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int
        The maximum number of calls to the function. If zero, then 100*(N+1) is
        the maximum where N is the number of elements in x0.
    epsfcn : float
        A suitable step length for the forward-difference approximation of the
        Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
        it is assumed that the relative errors in the functions are of the
        order of the machine precision.
    factor : float
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence
        N positive entries that serve as a scale factors for the variables.
    warning : bool
        Whether to print a warning message when the call is unsuccessful.
        Deprecated, use the warnings module instead.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution.  ``None`` if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual standard deviation to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the keys::

            - 'nfev' : the number of function calls
            - 'fvec' : the function evaluated at the output
            - 'fjac' : A permutation of the R matrix of a QR
                     factorization of the final approximate
                     Jacobian matrix, stored column wise.
                     Together with ipvt, the covariance of the
                     estimate can be approximated.
            - 'ipvt' : an integer array of length N which defines
                     a permutation matrix, p, such that
                     fjac*p = q*r, where r is upper triangular
                     with diagonal elements of nonincreasing
                     magnitude. Column j of p is column ipvt(j)
                     of the identity matrix.
            - 'qtf'  : the vector (transpose(q) * fvec).
    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    """
    if not warning:
        msg = "The warning keyword is deprecated. Use the warnings module."
        warnings.warn(msg, DeprecationWarning)
    x0 = array(x0, ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args, )
    m = check_func(func, x0, args, n)[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol,
                                 maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            check_func(Dfun, x0, args, n, (n, m))
        else:
            check_func(Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                                 ftol, xtol, gtol, maxfev, factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    if n == 1:
        retval = (retval[0][0], ) + retval[1:]

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except LinAlgError:
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
def estimate_poses(params, homographies):
    K_inv = inv(params)
    return [extract_transformation(np.dot(K_inv, H)) for H in homographies]
Exemplo n.º 33
0
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
            col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
            gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None,warning=True):
    """Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple
        Any extra arguments to func are placed in this tuple.
    Dfun : callable
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool
        non-zero to return all optional outputs.
    col_deriv : bool
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float
        Relative error desired in the sum of squares.
    xtol : float
        Relative error desired in the approximate solution.
    gtol : float
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int
        The maximum number of calls to the function. If zero, then 100*(N+1) is
        the maximum where N is the number of elements in x0.
    epsfcn : float
        A suitable step length for the forward-difference approximation of the
        Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
        it is assumed that the relative errors in the functions are of the
        order of the machine precision.
    factor : float
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence
        N positive entries that serve as a scale factors for the variables.
    warning : bool
        Whether to print a warning message when the call is unsuccessful.
        Deprecated, use the warnings module instead.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution.  ``None`` if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual standard deviation to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the keys::

            - 'nfev' : the number of function calls
            - 'fvec' : the function evaluated at the output
            - 'fjac' : A permutation of the R matrix of a QR
                     factorization of the final approximate
                     Jacobian matrix, stored column wise.
                     Together with ipvt, the covariance of the
                     estimate can be approximated.
            - 'ipvt' : an integer array of length N which defines
                     a permutation matrix, p, such that
                     fjac*p = q*r, where r is upper triangular
                     with diagonal elements of nonincreasing
                     magnitude. Column j of p is column ipvt(j)
                     of the identity matrix.
            - 'qtf'  : the vector (transpose(q) * fvec).
    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    """
    if not warning :
        msg = "The warning keyword is deprecated. Use the warnings module."
        warnings.warn(msg, DeprecationWarning)
    x0 = array(x0,ndmin=1)
    n = len(x0)
    if type(args) != type(()): args = (args,)
    m = check_func(func,x0,args,n)[0]
    if n>m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m))
    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200*(n+1)
        retval = _minpack._lmdif(func, x0, args, full_output,
                                 ftol, xtol, gtol,
                                 maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            check_func(Dfun,x0,args,n,(n,m))
        else:
            check_func(Dfun,x0,args,n,(m,n))
        if (maxfev == 0):
            maxfev = 100*(n+1)
        retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag)

    errors = {0:["Improper input parameters.", TypeError],
              1:["Both actual and predicted relative reductions "
                 "in the sum of squares\n  are at most %f" % ftol, None],
              2:["The relative error between two consecutive "
                 "iterates is at most %f" % xtol, None],
              3:["Both actual and predicted relative reductions in "
                 "the sum of squares\n  are at most %f and the "
                 "relative error between two consecutive "
                 "iterates is at \n  most %f" % (ftol,xtol), None],
              4:["The cosine of the angle between func(x) and any "
                 "column of the\n  Jacobian is at most %f in "
                 "absolute value" % gtol, None],
              5:["Number of calls to function has reached "
                 "maxfev = %d." % maxfev, ValueError],
              6:["ftol=%f is too small, no further reduction "
                 "in the sum of squares\n  is possible.""" % ftol, ValueError],
              7:["xtol=%f is too small, no further improvement in "
                 "the approximate\n  solution is possible." % xtol, ValueError],
              8:["gtol=%f is too small, func(x) is orthogonal to the "
                 "columns of\n  the Jacobian to machine "
                 "precision." % gtol, ValueError],
              'unknown':["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if (info not in [1,2,3,4] and not full_output):
        if info in [5,6,7,8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    if n == 1:
        retval = (retval[0][0],) + retval[1:]

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1,2,3,4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n),retval[1]['ipvt']-1,0)
            r = triu(transpose(retval[1]['fjac'])[:n,:])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R),R))
            except LinAlgError:
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg,info)
    else:
        return (retval[0], info)
Exemplo n.º 34
0
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
            col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
            gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
    """
    Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers. It must not return NaNs or
        fitting might fail.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple, optional
        Any extra arguments to func are placed in this tuple.
    Dfun : callable, optional
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool, optional
        non-zero to return all optional outputs.
    col_deriv : bool, optional
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float, optional
        Relative error desired in the sum of squares.
    xtol : float, optional
        Relative error desired in the approximate solution.
    gtol : float, optional
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int, optional
        The maximum number of calls to the function. If `Dfun` is provided
        then the default `maxfev` is 100*(N+1) where N is the number of elements
        in x0, otherwise the default `maxfev` is 200*(N+1).
    epsfcn : float, optional
        A variable used in determining a suitable step length for the forward-
        difference approximation of the Jacobian (for Dfun=None).
        Normally the actual step length will be sqrt(epsfcn)*x
        If epsfcn is less than the machine precision, it is assumed that the
        relative errors are of the order of the machine precision.
    factor : float, optional
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence, optional
        N positive entries that serve as a scale factors for the variables.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution. None if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual variance to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the key s:

        ``nfev``
            The number of function calls
        ``fvec``
            The function evaluated at the output
        ``fjac``
            A permutation of the R matrix of a QR
            factorization of the final approximate
            Jacobian matrix, stored column wise.
            Together with ipvt, the covariance of the
            estimate can be approximated.
        ``ipvt``
            An integer array of length N which defines
            a permutation matrix, p, such that
            fjac*p = q*r, where r is upper triangular
            with diagonal elements of nonincreasing
            magnitude. Column j of p is column ipvt(j)
            of the identity matrix.
        ``qtf``
            The vector (transpose(q) * fvec).

    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    cov_x is a Jacobian approximation to the Hessian of the least squares
    objective function.
    This approximation assumes that the objective function is based on the
    difference between some observed target data (ydata) and a (non-linear)
    function of the parameters `f(xdata, params)` ::

           func(params) = ydata - f(xdata, params)

    so that the objective function is ::

           min   sum((ydata - f(xdata, params))**2, axis=0)
         params

    """
    x0 = asarray(x0).flatten()
    n = len(x0)
    if not isinstance(args, tuple):
        args = (args,)
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps
    if Dfun is None:
        if maxfev == 0:
            maxfev = 200*(n + 1)
        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
                                 gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if maxfev == 0:
            maxfev = 100 * (n + 1)
        retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
                                 ftol, xtol, gtol, maxfev, factor, diag)

    errors = {0: ["Improper input parameters.", TypeError],
              1: ["Both actual and predicted relative reductions "
                  "in the sum of squares\n  are at most %f" % ftol, None],
              2: ["The relative error between two consecutive "
                  "iterates is at most %f" % xtol, None],
              3: ["Both actual and predicted relative reductions in "
                  "the sum of squares\n  are at most %f and the "
                  "relative error between two consecutive "
                  "iterates is at \n  most %f" % (ftol, xtol), None],
              4: ["The cosine of the angle between func(x) and any "
                  "column of the\n  Jacobian is at most %f in "
                  "absolute value" % gtol, None],
              5: ["Number of calls to function has reached "
                  "maxfev = %d." % maxfev, ValueError],
              6: ["ftol=%f is too small, no further reduction "
                  "in the sum of squares\n  is possible.""" % ftol,
                  ValueError],
              7: ["xtol=%f is too small, no further improvement in "
                  "the approximate\n  solution is possible." % xtol,
                  ValueError],
              8: ["gtol=%f is too small, func(x) is orthogonal to the "
                  "columns of\n  the Jacobian to machine "
                  "precision." % gtol, ValueError],
              'unknown': ["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if info not in [1, 2, 3, 4] and not full_output:
        if info in [5, 6, 7, 8]:
            warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except (LinAlgError, ValueError):
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Exemplo n.º 35
0
def generativeSeperateCov(trainData, trainLabels, testData, testLabels):
    (numClasses, N, mu, Slist, pList) = estimateGaussian(trainData, trainLabels)
    
    numCorrect = 0
    for x,t in izip(testData, testLabels):
        pXgivenClassList = []
        for i in range(numClasses):
            pXgivenClassList.append(1/sqrt(det(Slist[i])) + exp(-0.5 * dot(dot((x - mu[i]), inv(Slist[i])), (x-mu[i]))))
        a = log((pXgivenClassList[1]*pList[1]) / (pXgivenClassList[0]*pList[0]))
        probClass1 = sigmoid(a)
        if probClass1 >= 0.5:
            if t == 1:
                numCorrect += 1
        else:
            if t == 0:
                numCorrect += 1
    return float(numCorrect) / float(len(testLabels))
Exemplo n.º 36
0
    adapt = adapt_whitepoint.cache[key]
    return adapt


# Create the cache.
adapt_whitepoint.cache = {}


#### Data ######################################################################

# From the sRGB specification.
xyz_from_rgb = np.array(
    [[0.412453, 0.357580, 0.180423], [0.212671, 0.715160, 0.072169], [0.019334, 0.119193, 0.950227]]
)
rgb_from_xyz = inv(xyz_from_rgb)

# This transformation to LMS space keeps the peaks of colormatching curves
# normalized to 1.
lms_from_xyz = np.array(
    [
        [0.2434974736455316, 0.8523911562030849, -0.0515994646411065],
        [-0.3958579552426224, 1.1655483851630273, 0.0837969419671409],
        [0.0, 0.0, 0.6185822095756526],
    ]
)
xyz_from_lms = inv(lms_from_xyz)

# The transformation from XYZ to the ATD opponent colorspace. These are
# "official" values directly from Guth 1980.
atd_from_xyz = np.array([[0.0, 0.9341, 0.0], [0.7401, -0.6801, -0.1567], [-0.0061, -0.0212, 0.0314]])
Exemplo n.º 37
0
def leastsqbound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
                 col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
                 gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
    # use leastsq if no bounds are present
    if bounds is None:
        return leastsq(func, x0, args, Dfun, full_output, col_deriv,
                       ftol, xtol, gtol, maxfev, epsfcn, factor, diag)

    # create function which convert between internal and external parameters
    i2e = _internal2external_func(bounds)
    e2i = _external2internal_func(bounds)

    x0 = asarray(x0).flatten()
    i0 = e2i(x0)
    n = len(x0)
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')
    if not isinstance(args, tuple):
        args = (args,)
    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    if n > m:
        raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps

    # define a wrapped func which accept internal parameters, converts them
    # to external parameters and calls func
    def wfunc(x, *args):
        return func(i2e(x), *args)

    if Dfun is None:
        if (maxfev == 0):
            maxfev = 200 * (n + 1)
        retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
                                 gtol, maxfev, epsfcn, factor, diag)
    else:
        if col_deriv:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if (maxfev == 0):
            maxfev = 100 * (n + 1)

        def wDfun(x, *args):  # wrapped Dfun
            return Dfun(i2e(x), *args)

        retval = _minpack._lmder(wfunc, wDfun, i0, args, full_output,
                                 col_deriv, ftol, xtol, gtol, maxfev,
                                 factor, diag)

    errors = {0: ["Improper input parameters.", TypeError],
              1: ["Both actual and predicted relative reductions "
                  "in the sum of squares\n  are at most %f" % ftol, None],
              2: ["The relative error between two consecutive "
                  "iterates is at most %f" % xtol, None],
              3: ["Both actual and predicted relative reductions in "
                  "the sum of squares\n  are at most %f and the "
                  "relative error between two consecutive "
                  "iterates is at \n  most %f" % (ftol, xtol), None],
              4: ["The cosine of the angle between func(x) and any "
                  "column of the\n  Jacobian is at most %f in "
                  "absolute value" % gtol, None],
              5: ["Number of calls to function has reached "
                  "maxfev = %d." % maxfev, ValueError],
              6: ["ftol=%f is too small, no further reduction "
                  "in the sum of squares\n  is possible.""" % ftol,
                  ValueError],
              7: ["xtol=%f is too small, no further improvement in "
                  "the approximate\n  solution is possible." % xtol,
                  ValueError],
              8: ["gtol=%f is too small, func(x) is orthogonal to the "
                  "columns of\n  the Jacobian to machine "
                  "precision." % gtol, ValueError],
              'unknown': ["Unknown error.", TypeError]}

    info = retval[-1]    # The FORTRAN return value

    if (info not in [1, 2, 3, 4] and not full_output):
        if info in [5, 6, 7, 8]:
            pass #warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    x = i2e(retval[0])  # internal params to external params

    if full_output:
        # convert fjac from internal params to external
        grad = _internal2external_grad(retval[0], bounds)
        retval[1]['fjac'] = (retval[1]['fjac'].T / take(grad,
                             retval[1]['ipvt'] - 1)).T
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except LinAlgError:
                pass
        return (x, cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (x, info)