Esempio n. 1
0
def milpTransfer(originProb):
    newProb = NLP(originProb.f, originProb.x0)
    originProb.fill(newProb)
    newProb.discreteVars = originProb.discreteVars
    def err(s): # to prevent text output
        raise OpenOptException(s)
    newProb.err = err
    for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
        if hasattr(originProb, fn) and getattr(originProb.userProvided, fn) or originProb.isFDmodel:
            setattr(newProb, fn, getattr(originProb, fn))
    
    newProb.plot = 0
    newProb.iprint = -1
    newProb.nlpSolver = originProb.nlpSolver 
    return newProb
Esempio n. 2
0
def milpTransfer(originProb):
    newProb = NLP(originProb.f, originProb.x0)
    originProb.inspire(newProb)
    newProb.discreteVars = originProb.discreteVars

    def err(s):  # to prevent text output
        raise OpenOptException(s)

    newProb.err = err
    for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
        if hasattr(originProb, fn) and getattr(originProb.userProvided,
                                               fn) or originProb.isFDmodel:
            setattr(newProb, fn, getattr(originProb, fn))

    newProb.plot = 0
    newProb.iprint = -1
    newProb.nlpSolver = originProb.nlpSolver
    return newProb
Esempio n. 3
0
    #    p.maxfun=100

    # see also: help(NLP) -> maxTime, maxCPUTime, ftol and xtol
    # that are connected to / used in lincher and some other solvers

    # optional: check of user-supplied derivatives
    #p.checkdf()
    #p.checkdc()
    #p.checkdh()

    # last but not least:
    # please don't forget,
    # Python indexing starts from ZERO!!

    p.plot = 0
    p.iprint = 1
    #p.df_iter = 50
    p.maxTime = 4000
    print 'solving'
    r = p.solve('algencan')
    print 'done'
    pout = r.xf
    print 'solution:', pout

    print len(pout)

    P_up, P_down = transform_p(pout, Mx, Mz, M)
    P = P_up - P_down

    if 0:
        chi = chisq(p,
    def fit_node(self,index):
        qnode=self.qlist[index]
        print qnode.q
        th=qnode.th_condensed['a3']
        counts=qnode.th_condensed['counts']
        counts_err=qnode.th_condensed['counts_err']
        print qnode.th_condensed['counts'].std()
        print qnode.th_condensed['counts'].mean()
        maxval=qnode.th_condensed['counts'].max()
        minval=qnode.th_condensed['counts'].min()
        diff=qnode.th_condensed['counts'].max()-qnode.th_condensed['counts'].min()\
            -qnode.th_condensed['counts'].mean()
        sig=qnode.th_condensed['counts'].std()

        if diff-2*sig>0:
            #the difference between the high and low point and
            #the mean is greater than 3 sigma so we have a signal
            p0=findpeak(th,counts,1)
            print 'p0',p0
            #Area center width Bak
            center=p0[0]
            width=p0[1]
            sigma=width/2/N.sqrt(2*N.log(2))
            Imax=maxval-minval
            area=Imax*(N.sqrt(2*pi)*sigma)
            print 'Imax',Imax
            pin=[area,center,width,0]





            if 1:
                p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
                #p.lb=lowerm
                #p.ub=upperm
                p.args.f=(th,counts,counts_err)
                p.plot = 0
                p.iprint = 1
                p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
                p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
                p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
                p.maxTime = 4000
        #r=p.solve('scipy_cobyla')
            #r=p.solve('scipy_lbfgsb')
                #r = p.solve('algencan')
                print 'ralg'
                r = p.solve('ralg')
                print 'done'
                pfit=r.xf
                print 'pfit openopt',pfit
                print 'r dict', r.__dict__

            if 0:
                print 'curvefit'
                print sys.executable
                pfit,popt=curve_fit(gauss2, th, counts, p0=pfit, sigma=counts_err)
                print 'p,popt', pfit,popt
                perror=N.sqrt(N.diag(popt))
                print 'perror',perror
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=len(th)-len(pfit)
                print 'chisq',chisqr
            if 0:
                oparam=scipy.odr.Model(gauss)
                mydatao=scipy.odr.RealData(th,counts,sx=None,sy=counts_err)
                myodr = scipy.odr.ODR(mydatao, oparam, beta0=pfit)
                myoutput=myodr.run()
                myoutput.pprint()
                pfit=myoutput.beta
            if 1:
                print 'mpfit'
                p0=pfit
                parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
                parinfo=[]
                for i in range(len(p0)):
                    parinfo.append(copy.deepcopy(parbase))
                for i in range(len(p0)):
                    parinfo[i]['value']=p0[i]
                fa = {'x':th, 'y':counts, 'err':counts_err}
                #parinfo[1]['fixed']=1
                #parinfo[2]['fixed']=1
                m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
                if (m.status <= 0):
                    print 'error message = ', m.errmsg
                params=m.params
                pfit=params
                perror=m.perror
                #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=m.dof
                #Icalc=gauss(pfit,th)
                #print 'mpfit chisqr', chisqr


            if 0:
                width_x=N.linspace(p0[0]-p0[1],p0[0]+p0[1],100)
                width_y=N.ones(width_x.shape)*(maxval-minval)/2
                pos_y=N.linspace(minval,maxval,100)
                pos_x=N.ones(pos_y.shape)*p0[0]
                if 1:
                    pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                    pylab.plot(width_x,width_y)
                    pylab.plot(pos_x,pos_y)
                    pylab.plot(th,Icalc)
                    pylab.show()

        else:
            #fix center
            #fix width
            print 'no peak'
            #Area center width Bak
            area=0
            center=th[len(th)/2]
            width=(th.max()-th.min())/5.0  #rather arbitrary, but we don't know if it's the first....
            Bak=qnode.th_condensed['counts'].mean()
            p0=N.array([area,center,width,Bak],dtype='float64')  #initial conditions
            parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
            parinfo=[]
            for i in range(len(p0)):
                parinfo.append(copy.deepcopy(parbase))
            for i in range(len(p0)):
                parinfo[i]['value']=p0[i]
            fa = {'x':th, 'y':counts, 'err':counts_err}
            parinfo[1]['fixed']=1
            parinfo[2]['fixed']=1
            m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
            if (m.status <= 0):
                print 'error message = ', m.errmsg
            params=m.params
            pfit=params
            perror=m.perror
            #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
            chisqr=chisq(pfit,th,counts,counts_err)
            dof=m.dof
            Icalc=gauss(pfit,th)
            #print 'perror',perror
            if 0:
                pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                pylab.plot(th,Icalc)
                pylab.show()

        print 'final answer'
        print 'perror', 'perror'
        #If the fit is unweighted (i.e. no errors were given, or the weights
        #       were uniformly set to unity), then .perror will probably not represent
        #the true parameter uncertainties.

        #       *If* you can assume that the true reduced chi-squared value is unity --
        #       meaning that the fit is implicitly assumed to be of good quality --
        #       then the estimated parameter uncertainties can be computed by scaling
        #       .perror by the measured chi-squared value.

        #          dof = len(x) - len(mpfit.params) # deg of freedom
        #          # scaled uncertainties
        #          pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)

        print 'params', pfit
        print 'chisqr', chisqr  #note that chisqr already is scaled by dof
        pcerror=perror*N.sqrt(m.fnorm / m.dof)#chisqr
        print 'pcerror', pcerror

        self.qlist[index].th_integrated_intensity=N.abs(pfit[0])
        self.qlist[index].th_integrated_intensity_err=N.abs(pcerror[0])
        Icalc=gauss(pfit,th)
        print 'perror',perror
        if 0:
            pylab.figure()
            pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
            pylab.plot(th,Icalc)
            qstr=str(qnode.q['h_center'])+','+str(qnode.q['k_center'])+','+str(qnode.q['l_center'])
            pylab.title(qstr)
            #pylab.show()

        return
Esempio n. 5
0
        upperm=N.ones(len(p0))
    if 1:
        p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)

    if 0:
        p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)


    if 0:
        p = NLP(max_wrap, p0, maxIter = 1e3, maxFunEvals = 1e5)
    if 0:
        p.lb=lowerm
        p.ub=upperm
        p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
        p.plot = 0
        p.iprint = 1
        p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
        p.maxTime = 4000
        h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
Esempio n. 6
0

# see also: help(NLP) -> maxTime, maxCPUTime, ftol and xtol
# that are connected to / used in lincher and some other solvers

# optional: check of user-supplied derivatives
p.checkdf()
p.checkdc()
p.checkdh()

# last but not least:
# please don't forget,
# Python indexing starts from ZERO!!

p.plot = 0
p.iprint = 0
p.df_iter = 4
p.maxTime = 4000
p.debug=1
#r = p.solve('algencan')

r = p.solve('ralg')
#r = p.solve('lincher')

"""
typical output:
OpenOpt checks user-supplied gradient df (size: (50,))
according to:
prob.diffInt = 1e-07
prob.check.maxViolation = 1e-05
max(abs(df_user - df_numerical)) = 2.50111104094e-06
Esempio n. 7
0
def test(complexity=0, **kwargs):
    n = 15 * (complexity+1)

    x0 = 15*cos(arange(n)) + 8

    f = lambda x: ((x-15)**2).sum()
    df = lambda x: 2*(x-15)

    c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

    # dc(x)/dx: non-lin ineq constraints gradients (optional):
    def dc(x):
        r = zeros((len(c(x0)), n))
        r[0,0] = 2 * 4 * x[0]**3
        r[1,1] = 2 * x[1]
        r[1,2] = 2 * x[2]
        return r

    hp = 2
    h1 = lambda x: (x[-1]-13)**hp
    h2 = lambda x: (x[-2]-17)**hp
    h = lambda x:[h1(x), h2(x)]

    # dh(x)/dx: non-lin eq constraints gradients (optional):
    def dh(x):
        r = zeros((2, n))
        r[0, -1] = hp*(x[-1]-13)**(hp-1)
        r[1, -2] = hp*(x[-2]-17)**(hp-1)
        return r

    lb = -8*ones(n)
    ub = 15*ones(n)+8*cos(arange(n))

    ind = 3

    A = zeros((2, n))
    A[0, ind:ind+2] = 1
    A[1, ind+2:ind+4] = 1
    b = [15,  8]

    Aeq = zeros(n)
    Aeq[ind+4:ind+8] = 1
    beq = 45
    ########################################################
    colors = ['b', 'k', 'y', 'g', 'r']
    #solvers = ['ipopt', 'ralg','scipy_cobyla']
    solvers = ['ralg','scipy_slsqp', 'ipopt']
    solvers = [ 'ralg', 'scipy_slsqp']
    solvers = [ 'ralg']
    solvers = [ 'r2']
    solvers = [ 'ralg', 'scipy_slsqp']
    ########################################################
    for i, solver in enumerate(solvers):
        p = NLP(f, x0, df=df, c=c, h=h, dc=dc, dh=dh, lb=lb, ub=ub, A=A, b=b, Aeq=Aeq, beq=beq, maxIter = 1e4, \
                show = solver==solvers[-1], color=colors[i],  **kwargs )
        if not kwargs.has_key('iprint'): p.iprint = -1
#        p.checkdf()
#        p.checkdc()
#        p.checkdh()
        r = p.solve(solver)
    if r.istop>0: return True, r, p
    else: return False, r, p
Esempio n. 8
0
def fitpeak(x,y,yerr):
    maxval=x.max()
    minval=x.min()
    diff=y.max()-y.min()-y.mean()
    sig=y.std()
    print 'diff',diff,'std',sig
    if diff-1*sig>0:
        #the difference between the high and low point and
        #the mean is greater than 3 sigma so we have a signal
        p0=findpeak(x,y,2)
        print 'p0',p0
        #Area center width Bak area2 center2 width2
        center1=p0[0]
        width1=p0[1]
        center2=p0[2]
        width2=p0[3]
        sigma=width/2/N.sqrt(2*N.log(2))
        ymax=maxval-minval
        area=ymax*(N.sqrt(2*pi)*sigma)
        print 'ymax',ymax
        pin=[area,center1,width1,0,area,center2,width2]





        if 1:
            p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
            #p.lb=lowerm
            #p.ub=upperm
            p.args.f=(x,y,yerr)
            p.plot = 0
            p.iprint = 1
            p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
            p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
    #print 'maxiter', p.maxiter
    #print 'maxfun', p.maxfun
            p.maxIter=50
#    p.maxfun=100

    #p.df_iter = 50
            p.maxTime = 4000
    #r=p.solve('scipy_cobyla')
        #r=p.solve('scipy_lbfgsb')
            #r = p.solve('algencan')
            print 'ralg'
            r = p.solve('ralg')
            print 'done'
            pfit=r.xf
            print 'pfit openopt',pfit
            print 'r dict', r.__dict__
        if 1: 
            print 'mpfit'
            p0=pfit
            parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
            parinfo=[]
            for i in range(len(p0)):
                parinfo.append(copy.deepcopy(parbase))
            for i in range(len(p0)): 
                parinfo[i]['value']=p0[i]
            fa = {'x':x, 'y':y, 'err':yerr}
            #parinfo[1]['fixed']=1
            #parinfo[2]['fixed']=1
            m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
            if (m.status <= 0): 
                print 'error message = ', m.errmsg
            params=m.params
            pfit=params
            perror=m.perror
            #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
            chisqr=chisq(pfit,x,y,yerr)
            dof=m.dof
            #Icalc=gauss(pfit,th)
            #print 'mpfit chisqr', chisqr
        ycalc=gauss(pfit,x)

        if 1:
            width_x=N.linspace(p0[0]-p0[1],p0[0]+p0[1],100)
            width_y=N.ones(width_x.shape)*(maxval-minval)/2
            pos_y=N.linspace(minval,maxval,100)
            pos_x=N.ones(pos_y.shape)*p0[0]
            if 0:
                
                pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                pylab.plot(width_x,width_y)
                pylab.plot(pos_x,pos_y)
                pylab.plot(x,ycalc)
                pylab.show()

    else:
        #fix center
        #fix width
        print 'no peak'
        #Area center width Bak
        area=0
        center=x[len(x)/2]
        width=(x.max()-x.min())/5.0  #rather arbitrary, but we don't know if it's the first.... #better to use resolution
        Bak=y.mean()
        p0=N.array([area,center,width,Bak],dtype='float64')  #initial conditions
        parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
        parinfo=[]
        for i in range(len(p0)):
            parinfo.append(copy.deepcopy(parbase))
        for i in range(len(p0)): 
            parinfo[i]['value']=p0[i]
        fa = {'x':x, 'y':y, 'err':yerr}
        parinfo[1]['fixed']=1
        parinfo[2]['fixed']=1
        m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
        if (m.status <= 0): 
            print 'error message = ', m.errmsg
        params=m.params
        pfit=params
        perror=m.perror
        #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
        chisqr=chisq(pfit,x,y,yerr)
        dof=m.dof
        ycalc=gauss(pfit,x)
        #print 'perror',perror
        if 0:
            pylab.errorbar(x,y,yerr,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
            pylab.plot(x,ycalc)
            pylab.show()

    print 'final answer'
    print 'perror', 'perror'
    #If the fit is unweighted (i.e. no errors were given, or the weights
    #	were uniformly set to unity), then .perror will probably not represent
    #the true parameter uncertainties.

    #	*If* you can assume that the true reduced chi-squared value is unity --
    #	meaning that the fit is implicitly assumed to be of good quality --
    #	then the estimated parameter uncertainties can be computed by scaling
    #	.perror by the measured chi-squared value.

    #	   dof = len(x) - len(mpfit.params) # deg of freedom
    #	   # scaled uncertainties
    #	   pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)

    print 'params', pfit
    print 'chisqr', chisqr  #note that chisqr already is scaled by dof
    pcerror=perror*N.sqrt(m.fnorm / m.dof)#chisqr
    print 'pcerror', pcerror

    integrated_intensity=N.abs(pfit[0])
    integrated_intensity_err=N.abs(pcerror[0])    
    ycalc=gauss(pfit,x)
    print 'perror',perror
    if 1:
        pylab.figure()
        pylab.errorbar(x,y,yerr,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
        pylab.plot(x,ycalc)
        #qstr=str(qnode.q['h_center'])+','+str(qnode.q['k_center'])+','+str(qnode.q['l_center'])
        #pylab.title(qstr)
        pylab.show()

    return pfit,perror,pcerror,chisq
    def fit_node(self,index):
        qnode=self.qlist[index]
        print qnode.q
        th=qnode.th_condensed['a3']
        counts=qnode.th_condensed['counts']
        counts_err=qnode.th_condensed['counts_err']
        print qnode.th_condensed['counts'].std()
        print qnode.th_condensed['counts'].mean()
        maxval=qnode.th_condensed['counts'].max()
        minval=qnode.th_condensed['counts'].min()
        diff=qnode.th_condensed['counts'].max()-qnode.th_condensed['counts'].min()\
            -qnode.th_condensed['counts'].mean()
        sig=qnode.th_condensed['counts'].std()

        if diff-2*sig>0:
            #the difference between the high and low point and
            #the mean is greater than 3 sigma so we have a signal
            p0=findpeak(th,counts,1)
            print 'p0',p0
            #Area center width Bak
            center=p0[0]
            width=p0[1]
            sigma=width/2/N.sqrt(2*N.log(2))
            Imax=maxval-minval
            area=Imax*(N.sqrt(2*pi)*sigma)
            print 'Imax',Imax
            pin=[area,center,width,0]





            if 1:
                p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
                #p.lb=lowerm
                #p.ub=upperm
                p.args.f=(th,counts,counts_err)
                p.plot = 0
                p.iprint = 1
                p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
                p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
                p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
                p.maxTime = 4000
        #r=p.solve('scipy_cobyla')
            #r=p.solve('scipy_lbfgsb')
                #r = p.solve('algencan')
                print 'ralg'
                r = p.solve('ralg')
                print 'done'
                pfit=r.xf
                print 'pfit openopt',pfit
                print 'r dict', r.__dict__

            if 0:
                print 'curvefit'
                print sys.executable
                pfit,popt=curve_fit(gauss2, th, counts, p0=pfit, sigma=counts_err)
                print 'p,popt', pfit,popt
                perror=N.sqrt(N.diag(popt))
                print 'perror',perror
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=len(th)-len(pfit)
                print 'chisq',chisqr
            if 0:
                oparam=scipy.odr.Model(gauss)
                mydatao=scipy.odr.RealData(th,counts,sx=None,sy=counts_err)
                myodr = scipy.odr.ODR(mydatao, oparam, beta0=pfit)
                myoutput=myodr.run()
                myoutput.pprint()
                pfit=myoutput.beta
            if 1: 
                print 'mpfit'
                p0=pfit
                parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
                parinfo=[]
                for i in range(len(p0)):
                    parinfo.append(copy.deepcopy(parbase))
                for i in range(len(p0)): 
                    parinfo[i]['value']=p0[i]
                fa = {'x':th, 'y':counts, 'err':counts_err}
                #parinfo[1]['fixed']=1
                #parinfo[2]['fixed']=1
                m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
                if (m.status <= 0): 
                    print 'error message = ', m.errmsg
                params=m.params
                pfit=params
                perror=m.perror
                #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=m.dof
                #Icalc=gauss(pfit,th)
                #print 'mpfit chisqr', chisqr


            if 0:
                width_x=N.linspace(p0[0]-p0[1],p0[0]+p0[1],100)
                width_y=N.ones(width_x.shape)*(maxval-minval)/2
                pos_y=N.linspace(minval,maxval,100)
                pos_x=N.ones(pos_y.shape)*p0[0]
                if 1:
                    pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                    pylab.plot(width_x,width_y)
                    pylab.plot(pos_x,pos_y)
                    pylab.plot(th,Icalc)
                    pylab.show()

        else:
            #fix center
            #fix width
            print 'no peak'
            #Area center width Bak
            area=0
            center=th[len(th)/2]
            width=(th.max()-th.min())/5.0  #rather arbitrary, but we don't know if it's the first....
            Bak=qnode.th_condensed['counts'].mean()
            p0=N.array([area,center,width,Bak],dtype='float64')  #initial conditions
            parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
            parinfo=[]
            for i in range(len(p0)):
                parinfo.append(copy.deepcopy(parbase))
            for i in range(len(p0)): 
                parinfo[i]['value']=p0[i]
            fa = {'x':th, 'y':counts, 'err':counts_err}
            parinfo[1]['fixed']=1
            parinfo[2]['fixed']=1
            m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
            if (m.status <= 0): 
                print 'error message = ', m.errmsg
            params=m.params
            pfit=params
            perror=m.perror
            #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
            chisqr=chisq(pfit,th,counts,counts_err)
            dof=m.dof
            Icalc=gauss(pfit,th)
            #print 'perror',perror
            if 0:
                pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                pylab.plot(th,Icalc)
                pylab.show()

        print 'final answer'
        print 'perror', 'perror'
        #If the fit is unweighted (i.e. no errors were given, or the weights
        #	were uniformly set to unity), then .perror will probably not represent
        #the true parameter uncertainties.

        #	*If* you can assume that the true reduced chi-squared value is unity --
        #	meaning that the fit is implicitly assumed to be of good quality --
        #	then the estimated parameter uncertainties can be computed by scaling
        #	.perror by the measured chi-squared value.

        #	   dof = len(x) - len(mpfit.params) # deg of freedom
        #	   # scaled uncertainties
        #	   pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)

        print 'params', pfit
        print 'chisqr', chisqr  #note that chisqr already is scaled by dof
        pcerror=perror*N.sqrt(m.fnorm / m.dof)#chisqr
        print 'pcerror', pcerror

        self.qlist[index].th_integrated_intensity=N.abs(pfit[0])
        self.qlist[index].th_integrated_intensity_err=N.abs(pcerror[0])    
        Icalc=gauss(pfit,th)
        print 'perror',perror
        if 0:
            pylab.figure()
            pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
            pylab.plot(th,Icalc)
            qstr=str(qnode.q['h_center'])+','+str(qnode.q['k_center'])+','+str(qnode.q['l_center'])
            pylab.title(qstr)
            #pylab.show()

        return
Esempio n. 10
0
def test(complexity=0, **kwargs):
    n = 15 * (complexity + 1)

    x0 = 15 * cos(arange(n)) + 8

    f = lambda x: ((x - 15)**2).sum()
    df = lambda x: 2 * (x - 15)

    c = lambda x: [2 * x[0]**4 - 32, x[1]**2 + x[2]**2 - 8]

    # dc(x)/dx: non-lin ineq constraints gradients (optional):
    def dc(x):
        r = zeros((len(c(x0)), n))
        r[0, 0] = 2 * 4 * x[0]**3
        r[1, 1] = 2 * x[1]
        r[1, 2] = 2 * x[2]
        return r

    hp = 2
    h1 = lambda x: (x[-1] - 13)**hp
    h2 = lambda x: (x[-2] - 17)**hp
    h = lambda x: [h1(x), h2(x)]

    # dh(x)/dx: non-lin eq constraints gradients (optional):
    def dh(x):
        r = zeros((2, n))
        r[0, -1] = hp * (x[-1] - 13)**(hp - 1)
        r[1, -2] = hp * (x[-2] - 17)**(hp - 1)
        return r

    lb = -8 * ones(n)
    ub = 15 * ones(n) + 8 * cos(arange(n))

    ind = 3

    A = zeros((2, n))
    A[0, ind:ind + 2] = 1
    A[1, ind + 2:ind + 4] = 1
    b = [15, 8]

    Aeq = zeros(n)
    Aeq[ind + 4:ind + 8] = 1
    beq = 45
    ########################################################
    colors = ['b', 'k', 'y', 'g', 'r']
    #solvers = ['ipopt', 'ralg','scipy_cobyla']
    solvers = ['ralg', 'scipy_slsqp', 'ipopt']
    solvers = ['ralg', 'scipy_slsqp']
    solvers = ['ralg']
    solvers = ['r2']
    solvers = ['ralg', 'scipy_slsqp']
    ########################################################
    for i, solver in enumerate(solvers):
        p = NLP(f, x0, df=df, c=c, h=h, dc=dc, dh=dh, lb=lb, ub=ub, A=A, b=b, Aeq=Aeq, beq=beq, maxIter = 1e4, \
                show = solver==solvers[-1], color=colors[i],  **kwargs )
        if not kwargs.has_key('iprint'): p.iprint = -1
        #        p.checkdf()
        #        p.checkdc()
        #        p.checkdh()
        r = p.solve(solver)
    if r.istop > 0: return True, r, p
    else: return False, r, p
Esempio n. 11
0
def fitpeak(x, y, yerr):
    maxval = x.max()
    minval = x.min()
    diff = y.max() - y.min() - y.mean()
    sig = y.std()
    print 'diff', diff, 'std', sig
    if diff - 1 * sig > 0:
        #the difference between the high and low point and
        #the mean is greater than 3 sigma so we have a signal
        p0 = findpeak(x, y, 2)
        print 'p0', p0
        #Area center width Bak area2 center2 width2
        center1 = p0[0]
        width1 = p0[1]
        center2 = p0[2]
        width2 = p0[3]
        sigma = width / 2 / N.sqrt(2 * N.log(2))
        ymax = maxval - minval
        area = ymax * (N.sqrt(2 * pi) * sigma)
        print 'ymax', ymax
        pin = [area, center1, width1, 0, area, center2, width2]

        if 1:
            p = NLP(chisq, pin, maxIter=1e3, maxFunEvals=1e5)
            #p.lb=lowerm
            #p.ub=upperm
            p.args.f = (x, y, yerr)
            p.plot = 0
            p.iprint = 1
            p.contol = 1e-5  #3 # required constraints tolerance, default for NLP is 1e-6

            # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
            # (except maxfun, maxiter)
            # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
            # so it should be something like 1e-3...1e-5
            p.gradtol = 1e-5  #5 # gradient stop criterium (default for NLP is 1e-6)
            #print 'maxiter', p.maxiter
            #print 'maxfun', p.maxfun
            p.maxIter = 50
            #    p.maxfun=100

            #p.df_iter = 50
            p.maxTime = 4000
            #r=p.solve('scipy_cobyla')
            #r=p.solve('scipy_lbfgsb')
            #r = p.solve('algencan')
            print 'ralg'
            r = p.solve('ralg')
            print 'done'
            pfit = r.xf
            print 'pfit openopt', pfit
            print 'r dict', r.__dict__
        if 1:
            print 'mpfit'
            p0 = pfit
            parbase = {
                'value': 0.,
                'fixed': 0,
                'limited': [0, 0],
                'limits': [0., 0.]
            }
            parinfo = []
            for i in range(len(p0)):
                parinfo.append(copy.deepcopy(parbase))
            for i in range(len(p0)):
                parinfo[i]['value'] = p0[i]
            fa = {'x': x, 'y': y, 'err': yerr}
            #parinfo[1]['fixed']=1
            #parinfo[2]['fixed']=1
            m = mpfit(myfunct_res, p0, parinfo=parinfo, functkw=fa)
            if (m.status <= 0):
                print 'error message = ', m.errmsg
            params = m.params
            pfit = params
            perror = m.perror
            #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
            chisqr = chisq(pfit, x, y, yerr)
            dof = m.dof
            #Icalc=gauss(pfit,th)
            #print 'mpfit chisqr', chisqr
        ycalc = gauss(pfit, x)

        if 1:
            width_x = N.linspace(p0[0] - p0[1], p0[0] + p0[1], 100)
            width_y = N.ones(width_x.shape) * (maxval - minval) / 2
            pos_y = N.linspace(minval, maxval, 100)
            pos_x = N.ones(pos_y.shape) * p0[0]
            if 0:

                pylab.errorbar(th,
                               counts,
                               counts_err,
                               marker='s',
                               linestyle='None',
                               mfc='black',
                               mec='black',
                               ecolor='black')
                pylab.plot(width_x, width_y)
                pylab.plot(pos_x, pos_y)
                pylab.plot(x, ycalc)
                pylab.show()

    else:
        #fix center
        #fix width
        print 'no peak'
        #Area center width Bak
        area = 0
        center = x[len(x) / 2]
        width = (
            x.max() - x.min()
        ) / 5.0  #rather arbitrary, but we don't know if it's the first.... #better to use resolution
        Bak = y.mean()
        p0 = N.array([area, center, width, Bak],
                     dtype='float64')  #initial conditions
        parbase = {
            'value': 0.,
            'fixed': 0,
            'limited': [0, 0],
            'limits': [0., 0.]
        }
        parinfo = []
        for i in range(len(p0)):
            parinfo.append(copy.deepcopy(parbase))
        for i in range(len(p0)):
            parinfo[i]['value'] = p0[i]
        fa = {'x': x, 'y': y, 'err': yerr}
        parinfo[1]['fixed'] = 1
        parinfo[2]['fixed'] = 1
        m = mpfit(myfunct_res, p0, parinfo=parinfo, functkw=fa)
        if (m.status <= 0):
            print 'error message = ', m.errmsg
        params = m.params
        pfit = params
        perror = m.perror
        #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
        chisqr = chisq(pfit, x, y, yerr)
        dof = m.dof
        ycalc = gauss(pfit, x)
        #print 'perror',perror
        if 0:
            pylab.errorbar(x,
                           y,
                           yerr,
                           marker='s',
                           linestyle='None',
                           mfc='black',
                           mec='black',
                           ecolor='black')
            pylab.plot(x, ycalc)
            pylab.show()

    print 'final answer'
    print 'perror', 'perror'
    #If the fit is unweighted (i.e. no errors were given, or the weights
    #	were uniformly set to unity), then .perror will probably not represent
    #the true parameter uncertainties.

    #	*If* you can assume that the true reduced chi-squared value is unity --
    #	meaning that the fit is implicitly assumed to be of good quality --
    #	then the estimated parameter uncertainties can be computed by scaling
    #	.perror by the measured chi-squared value.

    #	   dof = len(x) - len(mpfit.params) # deg of freedom
    #	   # scaled uncertainties
    #	   pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)

    print 'params', pfit
    print 'chisqr', chisqr  #note that chisqr already is scaled by dof
    pcerror = perror * N.sqrt(m.fnorm / m.dof)  #chisqr
    print 'pcerror', pcerror

    integrated_intensity = N.abs(pfit[0])
    integrated_intensity_err = N.abs(pcerror[0])
    ycalc = gauss(pfit, x)
    print 'perror', perror
    if 1:
        pylab.figure()
        pylab.errorbar(x,
                       y,
                       yerr,
                       marker='s',
                       linestyle='None',
                       mfc='black',
                       mec='black',
                       ecolor='black')
        pylab.plot(x, ycalc)
        #qstr=str(qnode.q['h_center'])+','+str(qnode.q['k_center'])+','+str(qnode.q['l_center'])
        #pylab.title(qstr)
        pylab.show()

    return pfit, perror, pcerror, chisq
Esempio n. 12
0
    def run_optimization(self, plot=True, _only_check_gradients=False):
        """Start/run optimization procedure and the optimum unless.
        
        Set the keyword parameter 'plot' to False (default=True) if plotting
        should not be conducted.
        """
        grid = self.get_grid()
        model = self.get_model()

        # Initial try
        p0 = self.get_p0()

        # Less than (-0.5 < u < 1)
        # TODO: These are currently hard coded. They shouldn't be.
        #NLT = len(grid) * len(model.u)
        #Alt = N.zeros( (NLT, len(p0)) )
        #Alt[:, (len(grid) - 1) * len(model.x):] = -N.eye(len(grid) *
        #                                              len(model.u))
        #blt = -0.5*N.ones(NLT)

        # TODO: These are currently hard coded. They shouldn't be.
        #N_xvars = (len(grid) - 1) * len(model.x)
        #N_uvars = len(grid) * len(model.u)
        #N_vars = N_xvars + N_uvars
        #Alt = -N.eye(N_vars)
        #blt = N.zeros(N_vars)
        #blt[0:N_xvars] = -N.ones(N_xvars)*0.001
        #blt[N_xvars:] = -N.ones(N_uvars)*1;

        # Get OpenOPT handler
        p = NLP(
            self.f,
            p0,
            maxIter=1e3,
            maxFunEvals=1e3,
            #A=Alt, # See TODO above
            #b=blt, # See TODO above
            df=self.df,
            ftol=1e-4,
            xtol=1e-4,
            contol=1e-4)
        if len(grid) > 1:
            p.h = self.h
            p.dh = self.dh

        if plot:
            p.plot = 1
        p.iprint = 1

        if _only_check_gradients:
            # Check gradients against finite difference quotients
            p.checkdf(maxViolation=0.05)
            p.checkdh()
            return None

        #opt = p.solve('ralg') # does not work - serious convergence issues
        opt = p.solve('scipy_slsqp')

        if plot:
            plot_control_solutions(model, grid, opt.xf)

        return opt
Esempio n. 13
0
def single_shooting(model, initial_u=0.4, plot=True):
    """Run single shooting of model model with a constant u.
    
    The function returns the optimal u.
    
    Notes:
     * Currently written specifically for VDP.
     * Currently only supports one input/control signal.
    
    Parameters:
    model -- the model which is to be simulated. Only models with one control
             signal is supported.
             
    Keyword parameters:
    initial_u -- the initial input U_0 used to initialize the optimization
                 with.
    
    """
    assert len(model.u) == 1, "More than one control signal is " \
                                         "not supported as of today."

    start_time = model.opt_interval_get_start_time()
    end_time = model.opt_interval_get_final_time()

    u = model.u
    u0 = N.array([initial_u])
    print "Initial u:", u

    gradient = None
    gradient_u = None

    def f(cur_u):
        """The cost evaluation function."""
        model.reset()
        u[:] = cur_u
        print "u is", u
        big_gradient, last_y, gradparams, sens = _shoot(
            model, start_time, end_time)

        model.set_x_p(last_y, 0)
        model.set_dx_p(model.dx, 0)
        model.set_u_p(model.u, 0)
        cost = model.opt_eval_J()

        gradient_u = cur_u.copy()
        gradient = big_gradient[gradparams['u_start']:gradparams['u_end']]

        print "Cost:", cost
        print "Grad:", gradient
        return cost

    def df(cur_u):
        """The gradient of the cost function.
        
        NOT USED right now.
        """
        model.reset()
        u[:] = cur_u
        print "u is", u
        big_gradient, last_y, gradparams, sens = _shoot(
            model, start_time, end_time)

        model.set_x_p(last_y, 0)
        model.set_dx_p(model.dx, 0)
        model.set_u_p(model.u, 0)
        cost = model.opt_eval_J()

        gradient_u = cur_u.copy()
        gradient = big_gradient[gradparams['u_start']:gradparams['u_end']]

        print "Cost:", cost
        print "Grad:", gradient
        return gradient

    p = NLP(f, u0, maxIter=1e3, maxFunEvals=1e2)
    p.df = df
    if plot:
        p.plot = 1
    else:
        p.plot = 0
    p.iprint = 1

    u_opt = p.solve('scipy_slsqp')
    return u_opt
 def solve(self):
     p=NLP(self.cost_function,self.initial_solution,Aeq=self.Aeq,beq=self.beq,A=self.A,b=self.b,lb=self.lb,ub=self.ub)
     p.iprint = -1
     r=p.solve('ralg')
     return [r.xf,r.ff]