示例#1
0
    def doFit(self):
        guess = []
        scale = []  # scaling factor of variables
        ub = []  # upper bound
        lb = []  # lower bound
        for k in self.fitParam:
            v = self.baseParam[k]
            if isinstance(v, tuple):
                vmin = v[1]
                vmax = v[2]
                invS = abs(vmax - vmin)
                v = v[0]
            else:
                vmin = -1e100
                vmax = 1e100
                invS = min(1e5, max(1e-6, abs(float(v))))

            guess.append(v)
            lb.append(vmin)
            ub.append(vmax)
            scale.append(1. / invS)

        #result, success = leastsq(self.fun, guess, (), self.jac, warning=True, factor=1., diag=scale)
        #result, cov_x, infodict, mesg, success = leastsq(self.fun, guess, (), self.jac, warning=True, factor=1., full_output=1)
        #print '*****', success, infodict['nfev'], mesg

        LSP = NLLSP(self.fun,
                    guess,
                    df=self.jac,
                    ub=ub,
                    lb=lb,
                    scale=scale,
                    xtol=1e-12,
                    ftol=1e-12,
                    gtol=1e-12,
                    maxFunEvals=100)
        r = LSP.solve(self.solver)
        result = r.xf
        if isinstance(result, np.float):
            result = [result]
        e = self.fun(result)

        param = {}
        for i, k in enumerate(self.fitParam):
            param[k] = result[i]
        return param, norm(e)
示例#2
0
    def doFit(self):
        guess=[]
        scale=[] # scaling factor of variables
        ub=[] # upper bound
        lb=[] # lower bound
        for k in self.fitParam:
            v = self.baseParam[k]
            if isinstance(v, tuple):
                vmin = v[1]
                vmax = v[2]
                invS = abs(vmax-vmin)
                v=v[0]
            else:
                vmin = -1e100
                vmax = 1e100
                invS = min(1e5, max(1e-6, abs(float(v))))

            guess.append(v)
            lb.append(vmin)
            ub.append(vmax)
            scale.append(1./invS)

        #result, success = leastsq(self.fun, guess, (), self.jac, warning=True, factor=1., diag=scale)
        #result, cov_x, infodict, mesg, success = leastsq(self.fun, guess, (), self.jac, warning=True, factor=1., full_output=1)
        #print '*****', success, infodict['nfev'], mesg

        LSP = NLLSP(self.fun, guess, df = self.jac, ub=ub, lb=lb, scale=scale, xtol = 1e-12, ftol = 1e-12, gtol=1e-12, maxFunEvals = 100)
        r = LSP.solve(self.solver)
        result = r.xf
        if isinstance(result, np.float):
            result = [result]
        e = self.fun(result)

        param = {}
        for i,k in enumerate(self.fitParam):
            param[k] = result[i]
        return param, norm(e)
示例#3
0
from openopt import NLLSP

Amp, LifeTime, Offset, ScatterAmp = oovars('Amp', 'LifeTime', 'Offset',
                                           'ScatterAmp')

#print FuncMin(Time, 1.0, 1.0, 1.0, 1.0, Start, End)

objfunc = FuncMin(Time, Amp, LifeTime, Offset, ScatterAmp, Start, End)

startPoint = dict()
startPoint['Amp'] = 1.0
startPoint['LifeTime'] = 1.0
startPoint['LifeTime'] = 1.0
startPoint['ScatterAmp'] = 1.0

p = NLLSP(objfunc, startPoint)

# p.constraints = [
#                  (2*c+a-10)**2 < 1.5 + 0.1*b, (a-10)**2<1.5,
#                  a[0]>8.9, a+b > [ 7.97999836, 7.8552538 ],
#                  a < 9,
#                  (c-2)**2 < 1,
#                  b < -1.02,
#                  c > 1.01,
#                  (b + c * log10(a).sum() - 1) ** 2==0
#                  ]

r = p.solve('ralg')
RAmp, RLifeTime, ROffset, RScatterAmp = r(Amp, LifeTime, Offset, ScatterAmp)
print(RAmp, RLifeTime, ROffset, RScatterAmp)
示例#4
0
#optional: gradient
def df(x):
    r = zeros((3,2))
    r[0,0] = 2*x[0]
    r[0,1] = 2*x[1]
    r[1,0] = 4*x[0]**3
    r[1,1] = 4*x[1]**3
    r[2,0] = 1
    return r

# init esimation of solution - sometimes rather pricise one is very important
x0 = [1.5, 8]

#p = NLLSP(f, x0, diffInt = 1.5e-8, xtol = 1.5e-8, ftol = 1.5e-8)
# or
# p = NLLSP(f, x0)
# or
p = NLLSP(f, x0, df = df, xtol = 1.5e-8, ftol = 1.5e-8)

#optional: user-supplied gradient check:
p.checkdf()
#r = p.solve('scipy_leastsq', plot=1, iprint = -1)
#or using converter lsp2nlp:
r = p.solve('nlp:ralg', iprint = 1, plot=1)
#r = p.solve('nlp:ipopt',plot=1), r = p.solve('nlp:algencan'), r = p.solve('nlp:ralg'), etc
#(some NLP solvers require additional installation)

print 'x_opt:', r.xf # 2.74930862,  +/-2.5597651
print 'funcs Values:', p.f(r.xf) # [-0.888904734668, 0.0678251418575, -0.750691380965]
print 'f_opt:', r.ff, '; sum of squares (should be same value):', (p.f(r.xf) ** 2).sum() # 1.35828942657