예제 #1
0
def LinearFit():

    from ConstrainedFit import clsq

    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1, 6.1, 6.9, 7.9, 9.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
    covm= clsq.covmFromErrors( errors )

    upar= [ 0.1, 1.1 ]
    upnames= { 0: "a", 1: "b" }

    def linearConstrFun( mpar, upar, xv ):
        constraints= []
        for mparval, xval in zip( mpar, xv ):
            constraints.append( upar[0]+upar[1]*xval - mparval )
        return constraints

    solver= clsq.clsqSolver( data, covm, upar, linearConstrFun,
                             uparnames=upnames, args=(xabs,) )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve()
    ca= clsq.clsqAnalysis( solver )
    ca.printResults()

    return
예제 #2
0
def LinearFit():

    from ConstrainedFit import clsq

    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1, 6.1, 6.9, 7.9, 9.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
    covm= clsq.covmFromErrors( errors )

    upar= [ 0.1, 1.1 ]
    upnames= { 0: "a", 1: "b" }

    def linearConstrFun( mpar, upar, xv ):
        constraints= []
        for mparval, xval in zip( mpar, xv ):
            constraints.append( upar[0]+upar[1]*xval - mparval )
        return constraints

    solver= clsq.clsqSolver( data, covm, upar, linearConstrFun, 
                             uparnames=upnames, args=(xabs,) )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve()
    ca= clsq.clsqAnalysis( solver )
    ca.printResults()

    return
예제 #3
0
def Triangle( opt="" ):

    from ConstrainedFit import clsq
    from math import sqrt, tan

    data= [ 10.0, 7.0, 9.0, 1.0 ]
    errors= [ 0.05, 0.2, 0.2, 0.02 ]
    covm= clsq.covmFromErrors( errors )
    mpnames= { 0: "a", 1: "b", 2: "c", 3: "gamma" }

    upar= [ 30.0 ]
    upnames= { 0: "A" }

    def triangleConstrFun( mpar, upar ):
        a= mpar[0]
        b= mpar[1]
        c= mpar[2]
        gamma= mpar[3]
        aa= upar[0]
        p= (a+b+c)/2.0
        s= sqrt( p*(p-a)*(p-b)*(p-c) )
        return [ tan(gamma/2.0)-s/(p*(p-c)), aa-s ]

    solver= clsq.clsqSolver( data, covm, upar, triangleConstrFun,
                             uparnames=upnames, mparnames=mpnames )

    print( "Constraints before solution" )
    print( solver.getConstraints() )
    lBlobel= False
    lCorr= False
    lResidual= True
    if "b" in opt:
        lBlobel= True
    if "corr" in opt:
        lCorr= True
    if "r" in opt:
        lResidual= False
    solver.solve( lBlobel=lBlobel, lResidual=lResidual )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    if "m" in opt:
        _doMinosAll( solver )

    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="m" )

    print( "Profile A" )
    par= clsq.createClsqPar( 0, "u", solver )
    results= ca.profile( par )
    print( results )

    return solver
예제 #4
0
    def _createSolver(self, gm, parindxmaps, errorkeys, systerrormatrix, data,
                      extrapars, extraparerrors, upar, upnames, mpnames,
                      extraparnames):

        # Get reduced covariance matrix and add "measured parameter"
        # errors to diagonal:
        dataparser = self._getDataparser()
        covm = dataparser.getTotalReducedCovarianceAslist()
        self.__addExtraparErrors(covm, extraparerrors)
        hcovopt = dataparser.getCovoption()
        originaldata = dataparser.getValues()
        ndata = len(data)

        # Constraints function for average:
        def avgConstrFun(mpar, upar):
            umpar = gm * upar
            constraints = []
            for ival in range(ndata):
                constraint = -umpar[ival]
                for ierr in parindxmaps.keys():
                    covopt = hcovopt[errorkeys[ierr]]
                    indxmap = parindxmaps[ierr]
                    if ival in indxmap.keys():
                        parindx = indxmap[ival] + ndata
                        term = mpar[parindx] * systerrormatrix[ierr][ival]
                        if "r" in covopt:
                            # linearised exponential a la Blobel for
                            # multiplicative rel. error
                            # constraint*= ( 1.0 + term/originaldata[ival] )
                            constraint /= (1.0 + term / originaldata[ival])
                        else:
                            # Additive error:
                            constraint += term
                constraint += mpar[ival]
                constraints.append(constraint)
            return constraints

        # Create solver and return it:
        upnames = dict((upnames.index(name), name) for name in upnames)
        names = mpnames + extraparnames
        names = dict((names.index(name), name) for name in names)
        solver = clsq.clsqSolver(data + extrapars,
                                 covm,
                                 upar,
                                 avgConstrFun,
                                 uparnames=upnames,
                                 mparnames=names,
                                 ndof=ndata - len(upar))

        return solver
예제 #5
0
def Triangle( opt="" ):

    from ConstrainedFit import clsq
    from math import sqrt, tan

    data= [ 10.0, 7.0, 9.0, 1.0 ]
    errors= [ 0.05, 0.2, 0.2, 0.02 ]
    covm= clsq.covmFromErrors( errors )
    mpnames= { 0: "a", 1: "b", 2: "c", 3: "gamma" }

    upar= [ 30.0 ]
    upnames= { 0: "A" }

    def triangleConstrFun( mpar, upar ):
        a= mpar[0]
        b= mpar[1]
        c= mpar[2]
        gamma= mpar[3]
        aa= upar[0]
        p= (a+b+c)/2.0
        s= sqrt( p*(p-a)*(p-b)*(p-c) )
        return [ tan(gamma/2.0)-s/(p*(p-c)), aa-s ]

    solver= clsq.clsqSolver( data, covm, upar, triangleConstrFun,
                             uparnames=upnames, mparnames=mpnames )
    print "Constraints before solution"
    print solver.getConstraints()
    lBlobel= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "corr" in opt:
        lCorr= True
    solver.solve( lBlobel=lBlobel )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    if "m" in opt:
        _doMinosAll( solver )

    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="m" )

    print "Profile A"
    par= clsq.createClsqPar( 0, "u", solver )
    results= ca.profile( par )
    print results

    return solver
예제 #6
0
    def _createSolver( self, gm, parindxmaps, errorkeys, 
                       systerrormatrix, data,
                       extrapars, extraparerrors, upar, 
                       upnames, mpnames, extraparnames ):

        # Get reduced covariance matrix and add "measured parameter"
        # errors to diagonal:
        dataparser= self._getDataparser()
        covm= dataparser.getTotalReducedCovarianceAslist()
        self.__addExtraparErrors( covm, extraparerrors )
        hcovopt= dataparser.getCovoption()
        originaldata= dataparser.getValues()
        ndata= len( data )

        # Constraints function for average:
        def avgConstrFun( mpar, upar ):
            umpar= gm*upar
            constraints= []
            for ival in range( ndata ):
                constraint= - umpar[ival]
                for ierr in parindxmaps.keys():
                    covopt= hcovopt[errorkeys[ierr]]
                    indxmap= parindxmaps[ierr]
                    if ival in indxmap.keys():
                        parindx= indxmap[ival] + ndata
                        term= mpar[parindx]*systerrormatrix[ierr][ival]
                        if "r" in covopt:
                            # linearised exponential a la Blobel for 
                            # multiplicative rel. error
                            # constraint*= ( 1.0 + term/originaldata[ival] )
                            constraint/= ( 1.0 + term/originaldata[ival] )
                        else:
                            # Additive error:
                            constraint+= term
                constraint+= mpar[ival]
                constraints.append( constraint )
            return constraints

        # Create solver and return it:
        upnames= dict( (upnames.index(name),name) for name in upnames )
        names= mpnames + extraparnames
        names= dict( (names.index(name),name) for name in names )
        solver= clsq.clsqSolver( data+extrapars, covm, upar, avgConstrFun,
                                 uparnames=upnames, mparnames=names,
                                 ndof=ndata-len(upar) )

        return solver
예제 #7
0
def Branchingratios( opt="m" ):

    from ConstrainedFit import clsq

    data= [ 0.265, 0.28, 0.37, 0.166, 0.42, 0.5, 0.20, 0.16, 
            0.72, 0.6, 0.37, 0.64, 0.45, 0.028, 10.0, 7.5 ]
    errors= [ 0.014, 0.05, 0.06, 0.013, 0.15, 0.2, 0.08, 0.08,
              0.15, 0.4, 0.16, 0.40, 0.45, 0.009, 5.0, 2.5 ]
    # Error scale factor a la Blobel lecture:
    if "e" in opt:
        print "Apply scaling *2.8 of error[13]"
        errors[13]= errors[13]*2.8
    covm= clsq.covmFromErrors( errors )

    upar= [ 0.33, 0.36, 0.16, 0.09, 0.055 ]
    upnames= { 0: "B1", 1: "B2", 2: "B3", 3: "B4", 4: "B5" }

    def brConstrFun( mpar, upar ):
        constraints= []
        x= []
        for i in range( 5 ):
            x.append( upar[i] )
        for i in range( 5, 21 ):
            x.append( mpar[i-5] )

        constraints.append( x[0]+x[1]+x[2]+x[3]+x[4]-1.0 )
        constraints.append( x[3]-x[5]*x[0] )
        constraints.append( x[3]-x[6]*x[0] )
        constraints.append( x[3]-x[7]*x[0] )
        
        constraints.append( x[3]-(x[1]+x[2])*x[8] )
        constraints.append( x[3]-(x[1]+x[2])*x[9] )
        constraints.append( x[3]-(x[1]+x[2])*x[10] )
        constraints.append( x[3]-(x[1]+x[2])*x[11] )
        constraints.append( x[3]-(x[1]+x[2])*x[12] )
        
        constraints.append( x[1]-(x[1]+x[2])*x[13] )
        constraints.append( x[1]-(x[1]+x[2])*x[14] )
        
        constraints.append( x[0]-(x[1]+x[2])*x[15] )
        constraints.append( x[0]-(x[1]+x[2])*x[16] )
        
        constraints.append( 3.0*x[4]-x[0]*x[17] )
        
        constraints.append( x[3]-x[18] )
        
        constraints.append( (x[1]+x[2])-x[4]*x[19] )
        constraints.append( (x[1]+x[2])-x[4]*x[20] )

        return constraints

    solver= clsq.clsqSolver( data, covm, upar, brConstrFun, epsilon=0.00001,
                             uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    lBlobel= False
    if "b" in opt:
        lBlobel= True
    solver.solve( lBlobel=lBlobel )
    lcov= False
    lcorr= False
    if "corr" in opt:
        lcov= True
        lcorr= True

    ca= clsq.clsqAnalysis( solver )
    ca.printResults( cov=lcov, corr=lcorr )

    if "m" in opt:
        _doMinosAll( solver, "u" )
    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="u" )

    return
예제 #8
0
def StraightLine( opt="" ):

    from ConstrainedFit import clsq
    from numpy import matrix, zeros

    # Data, errors and correlations:
    xdata= [ 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0 ]
    ydata= [ 3.0, 2.5, 3.0, 5.0, 7.0, 5.5, 7.5 ]
    xerrs= [ 0.5, 0.3, 0.3, 0.5, 0.5, 0.3, 0.3 ]
    yerrs= [ 0.7, 1.0, 0.5, 0.7, 0.7, 1.0, 0.7 ]
    xyrho= [ -0.25, 0.5, 0.5, -0.25, 0.25, 0.95, -0.25 ]
    #xyrho= [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
    covm= matrix( zeros( (14,14) ) )
    data= []
    npoints= len(xdata)
    for i in range( npoints ):
        subcovm= matrix( [ [ xerrs[i]**2, xyrho[i]*xerrs[i]*yerrs[i] ], 
                           [ xyrho[i]*xerrs[i]*yerrs[i], yerrs[i]**2 ] ] )
        covm[2*i:2*i+2,2*i:2*i+2]= subcovm
        data.append( xdata[i] )
        data.append( ydata[i] )
    print covm
    print data

    # Fit parameters for straight line:
    upar= [ 1.0, 0.5 ]
    upnames= { 0: "a", 1: "b" }
    #upar= [ 0.0, 1.0, 1.0 ]
    #upnames= { 0: "a", 1: "b", 2: "c" }

    # Constraint function forces y_i = a + b*x_i for every
    # pair of measurements x_i, y_i:
    def straightlineConstraints( mpar, upar ):
        constraints= []
        for i in range( npoints ):
            constraints.append( upar[0] + upar[1]*mpar[2*i] 
                                # + upar[2]*mpar[2*i]**2
                                - mpar[2*i+1] )
        return constraints

    # Setup the solver and solve:
    solver= clsq.clsqSolver( data, covm, upar, straightlineConstraints,
                             uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    lBlobel= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "corr" in opt:
        lCorr= True
    solver.solve( lBlobel=lBlobel )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    if "m" in opt:
        _doMinosAll( solver, "u" )

    global tg, lell, tf, tt, canvc, canvp
    from ROOT import TGraph, TF1, TText, TCanvas

    if "cont" in opt:
        canvc= TCanvas( "canv", "Chi^2 Contours", 600, 600 )
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="u" )


    # Plot:
    from array import array
    xarr= array( "f", xdata )
    yarr= array( "f", ydata )
    tg= TGraph( npoints, xarr, yarr )
    tg.SetMarkerStyle( 20 )
    tg.SetMinimum( 0.0 )
    tg.SetMaximum( 9.0 )
    tg.SetTitle( "straight line 2D fit" )
    xa= tg.GetXaxis()
    ya= tg.GetYaxis()
    xa.SetTitle( "X" )
    ya.SetTitle( "Y" )
    canvp= TCanvas( "canp", "Straight line 2D fit", 600, 400 )
    tg.Draw( "ap" )
    lell= []
    for i in range( npoints ):
        te= _makeEllipse( xdata[i], ydata[i], xerrs[i], yerrs[i], xyrho[i] )
        lell.append( te )
        te.Draw( "s" )
    solution= solver.getUpars()
    tf= TF1( "tf", "[0]+[1]*x", 0.0, 15.0 )
    for i in range( len(upar) ):
        tf.SetParameter( i, solution[i] )
        tf.SetParName( 0, upnames[i] )
    tf.Draw( "same" )
    tt= TText( 1, 8, "y= a + b*x" )
    tt.Draw( "same" )

    return
예제 #9
0
def GaussLikelihood( opt="" ):

    from ConstrainedFit import clhood, clsq
    from scipy.stats import norm
    from math import log

    # Data and errors:
    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1 ]

    # Linear function (straight line) parameters:
    upar= [ 0.0, 1.0 ]
    upnames= { 0: "a", 1: "b" }

    # Likelihood is sum of log(Gauss) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval, error in zip( data, mpar, errors ):
            parval= parval.item()
            result-= log( norm.pdf( datum, parval, error ) )
            # result+= 0.5*((datum-parval)/error)**2
        return result

    # Constraints force linear function for each data point:
    def constrFun( mpar, upar ):
        constraints= []
        for xval, parval in zip( xabs, mpar ):
            constraints.append( upar[0] + upar[1]*xval - parval )
        return constraints

    # Configure options:
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True

    # Solution using constrained log(likelihood) minimisation:
    print "\nMax likelihood constrained fit"
    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Solution using constrained least squares:
    print "\nLeast squares constrained fit"
    covm= clsq.covmFromErrors( errors )
    solver= clsq.clsqSolver( data, covm, upar, constrFun, uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return
예제 #10
0
def Branchingratios( opt="m" ):

    from ConstrainedFit import clsq

    data= [ 0.265, 0.28, 0.37, 0.166, 0.42, 0.5, 0.20, 0.16,
            0.72, 0.6, 0.37, 0.64, 0.45, 0.028, 10.0, 7.5 ]
    errors= [ 0.014, 0.05, 0.06, 0.013, 0.15, 0.2, 0.08, 0.08,
              0.15, 0.4, 0.16, 0.40, 0.45, 0.009, 5.0, 2.5 ]
    # Error scale factor a la Blobel lecture:
    if "e" in opt:
        print( "Apply scaling *2.8 of error[13]" )
        errors[13]= errors[13]*2.8
    covm= clsq.covmFromErrors( errors )

    # PDG values as start values, last number is 5.5%, not 0.55 as in br.txt
    upar= [ 0.33, 0.36, 0.16, 0.09, 0.055 ]
    upnames= { 0: "B1", 1: "B2", 2: "B3", 3: "B4", 4: "B5" }

    def brConstrFun( mpar, upar ):
        constraints= []
        x= []
        for i in range( 5 ):
            x.append( upar[i] )
        for i in range( 5, 21 ):
            x.append( mpar[i-5] )

        constraints.append( x[0]+x[1]+x[2]+x[3]+x[4]-1.0 )
        constraints.append( x[3]-x[5]*x[0] )
        constraints.append( x[3]-x[6]*x[0] )
        constraints.append( x[3]-x[7]*x[0] )

        constraints.append( x[3]-(x[1]+x[2])*x[8] )
        constraints.append( x[3]-(x[1]+x[2])*x[9] )
        constraints.append( x[3]-(x[1]+x[2])*x[10] )
        constraints.append( x[3]-(x[1]+x[2])*x[11] )
        constraints.append( x[3]-(x[1]+x[2])*x[12] )

        constraints.append( x[1]-(x[1]+x[2])*x[13] )
        constraints.append( x[1]-(x[1]+x[2])*x[14] )

        constraints.append( x[0]-(x[1]+x[2])*x[15] )
        constraints.append( x[0]-(x[1]+x[2])*x[16] )

        constraints.append( 3.0*x[4]-x[0]*x[17] )

        constraints.append( x[3]-x[18] )

        constraints.append( (x[1]+x[2])-x[4]*x[19] )
        constraints.append( (x[1]+x[2])-x[4]*x[20] )

        return constraints

    solver= clsq.clsqSolver( data, covm, upar, brConstrFun, epsilon=0.00001,
                             uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    lBlobel= False
    if "b" in opt:
        lBlobel= True
    lResidual= False
    if "r" in opt:
        lResidual= True
    solver.solve( lBlobel=lBlobel, lResidual=lResidual )
    lcov= False
    lcorr= False
    if "corr" in opt:
        lcov= True
        lcorr= True

    ca= clsq.clsqAnalysis( solver )
    ca.printResults( cov=lcov, corr=lcorr )

    if "m" in opt:
        _doMinosAll( solver, "u" )
    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="u" )

    return
예제 #11
0
def StraightLine( opt="" ):

    from ConstrainedFit import clsq
    from numpy import matrix, zeros

    # Data, errors and correlations:
    xdata= [ 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0 ]
    ydata= [ 3.0, 2.5, 3.0, 5.0, 7.0, 5.5, 7.5 ]
    xerrs= [ 0.5, 0.3, 0.3, 0.5, 0.5, 0.3, 0.3 ]
    yerrs= [ 0.7, 1.0, 0.5, 0.7, 0.7, 1.0, 0.7 ]
    xyrho= [ -0.25, 0.5, 0.5, -0.25, 0.25, 0.95, -0.25 ]
    #xyrho= [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
    covm= matrix( zeros( (14,14) ) )
    data= []
    npoints= len(xdata)
    for i in range( npoints ):
        subcovm= matrix( [ [ xerrs[i]**2, xyrho[i]*xerrs[i]*yerrs[i] ],
                           [ xyrho[i]*xerrs[i]*yerrs[i], yerrs[i]**2 ] ] )
        covm[2*i:2*i+2,2*i:2*i+2]= subcovm
        data.append( xdata[i] )
        data.append( ydata[i] )
    print( covm )
    print( data )

    # Fit parameters for straight line:
    upar= [ 1.0, 0.5 ]
    upnames= { 0: "a", 1: "b" }
    # or parabola, see possible mpar[.]**2 term in constraints
    #upar= [ 0.0, 1.0, 1.0 ]
    #upnames= { 0: "a", 1: "b", 2: "c" }

    # Constraint function forces y_i = a + b*x_i for every
    # pair of measurements x_i, y_i:
    def straightlineConstraints( mpar, upar ):
        constraints= []
        for i in range( npoints ):
            constraints.append( upar[0] + upar[1]*mpar[2*i]
                                # + upar[2]*mpar[2*i]**2
                                - mpar[2*i+1] )
        return constraints

    # Setup the solver and solve:
    solver= clsq.clsqSolver( data, covm, upar, straightlineConstraints,
                             uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    lBlobel= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "corr" in opt:
        lCorr= True
    solver.solve( lBlobel=lBlobel )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    if "m" in opt:
        _doMinosAll( solver, "u" )

    global tg, lell, tf, tt, canvc, canvp
    from ROOT import TGraph, TF1, TText, TCanvas

    if "cont" in opt:
        canvc= TCanvas( "canv", "Chi^2 Contours", 600, 600 )
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="u" )

    # Plot:
    from array import array
    xarr= array( "f", xdata )
    yarr= array( "f", ydata )
    tg= TGraph( npoints, xarr, yarr )
    tg.SetMarkerStyle( 20 )
    tg.SetMinimum( 0.0 )
    tg.SetMaximum( 9.0 )
    tg.SetTitle( "straight line 2D fit" )
    xa= tg.GetXaxis()
    ya= tg.GetYaxis()
    xa.SetTitle( "X" )
    ya.SetTitle( "Y" )
    canvp= TCanvas( "canp", "Straight line 2D fit", 600, 400 )
    tg.Draw( "ap" )
    lell= []
    for i in range( npoints ):
        te= _makeEllipse( xdata[i], ydata[i], xerrs[i], yerrs[i], xyrho[i] )
        lell.append( te )
        te.Draw( "s" )
    solution= solver.getUpars()
    tf= TF1( "tf", "[0]+[1]*x", 0.0, 15.0 )
    for i in range( len(upar) ):
        tf.SetParameter( i, solution[i] )
        tf.SetParName( 0, upnames[i] )
    tf.Draw( "same" )
    tt= TText( 1, 8, "y= a + b*x" )
    tt.Draw( "same" )

    return
예제 #12
0
def GaussLikelihood( opt="" ):

    from ConstrainedFit import clhood, clsq
    from scipy.stats import norm
    from math import log

    # Data and errors:
    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1 ]

    # Linear function (straight line) parameters:
    upar= [ 0.0, 1.0 ]
    upnames= { 0: "a", 1: "b" }

    # Likelihood is sum of log(Gauss) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval, error in zip( data, mpar, errors ):
            parval= parval.item()
            result-= log( norm.pdf( datum, parval, error ) )
            # result+= 0.5*((datum-parval)/error)**2
        return result

    # Constraints force linear function for each data point:
    def constrFun( mpar, upar ):
        constraints= []
        for xval, parval in zip( xabs, mpar ):
            constraints.append( upar[0] + upar[1]*xval - parval )
        return constraints

    # Configure options:
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True

    # Solution using constrained log(likelihood) minimisation:
    print( "\nMax likelihood constrained fit" )
    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Solution using constrained least squares:
    print( "\nLeast squares constrained fit" )
    covm= clsq.covmFromErrors( errors )
    solver= clsq.clsqSolver( data, covm, upar, constrFun, uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return