예제 #1
0
def LinearFit():

    from ConstrainedFit import clsq

    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1, 6.1, 6.9, 7.9, 9.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
    covm= clsq.covmFromErrors( errors )

    upar= [ 0.1, 1.1 ]
    upnames= { 0: "a", 1: "b" }

    def linearConstrFun( mpar, upar, xv ):
        constraints= []
        for mparval, xval in zip( mpar, xv ):
            constraints.append( upar[0]+upar[1]*xval - mparval )
        return constraints

    solver= clsq.clsqSolver( data, covm, upar, linearConstrFun, 
                             uparnames=upnames, args=(xabs,) )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve()
    ca= clsq.clsqAnalysis( solver )
    ca.printResults()

    return
예제 #2
0
def LinearFit():

    from ConstrainedFit import clsq

    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1, 6.1, 6.9, 7.9, 9.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
    covm= clsq.covmFromErrors( errors )

    upar= [ 0.1, 1.1 ]
    upnames= { 0: "a", 1: "b" }

    def linearConstrFun( mpar, upar, xv ):
        constraints= []
        for mparval, xval in zip( mpar, xv ):
            constraints.append( upar[0]+upar[1]*xval - mparval )
        return constraints

    solver= clsq.clsqSolver( data, covm, upar, linearConstrFun,
                             uparnames=upnames, args=(xabs,) )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve()
    ca= clsq.clsqAnalysis( solver )
    ca.printResults()

    return
예제 #3
0
def Triangle( opt="" ):

    from ConstrainedFit import clsq
    from math import sqrt, tan

    data= [ 10.0, 7.0, 9.0, 1.0 ]
    errors= [ 0.05, 0.2, 0.2, 0.02 ]
    covm= clsq.covmFromErrors( errors )
    mpnames= { 0: "a", 1: "b", 2: "c", 3: "gamma" }

    upar= [ 30.0 ]
    upnames= { 0: "A" }

    def triangleConstrFun( mpar, upar ):
        a= mpar[0]
        b= mpar[1]
        c= mpar[2]
        gamma= mpar[3]
        aa= upar[0]
        p= (a+b+c)/2.0
        s= sqrt( p*(p-a)*(p-b)*(p-c) )
        return [ tan(gamma/2.0)-s/(p*(p-c)), aa-s ]

    solver= clsq.clsqSolver( data, covm, upar, triangleConstrFun,
                             uparnames=upnames, mparnames=mpnames )

    print( "Constraints before solution" )
    print( solver.getConstraints() )
    lBlobel= False
    lCorr= False
    lResidual= True
    if "b" in opt:
        lBlobel= True
    if "corr" in opt:
        lCorr= True
    if "r" in opt:
        lResidual= False
    solver.solve( lBlobel=lBlobel, lResidual=lResidual )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    if "m" in opt:
        _doMinosAll( solver )

    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="m" )

    print( "Profile A" )
    par= clsq.createClsqPar( 0, "u", solver )
    results= ca.profile( par )
    print( results )

    return solver
예제 #4
0
def Triangle( opt="" ):

    from ConstrainedFit import clsq
    from math import sqrt, tan

    data= [ 10.0, 7.0, 9.0, 1.0 ]
    errors= [ 0.05, 0.2, 0.2, 0.02 ]
    covm= clsq.covmFromErrors( errors )
    mpnames= { 0: "a", 1: "b", 2: "c", 3: "gamma" }

    upar= [ 30.0 ]
    upnames= { 0: "A" }

    def triangleConstrFun( mpar, upar ):
        a= mpar[0]
        b= mpar[1]
        c= mpar[2]
        gamma= mpar[3]
        aa= upar[0]
        p= (a+b+c)/2.0
        s= sqrt( p*(p-a)*(p-b)*(p-c) )
        return [ tan(gamma/2.0)-s/(p*(p-c)), aa-s ]

    solver= clsq.clsqSolver( data, covm, upar, triangleConstrFun,
                             uparnames=upnames, mparnames=mpnames )
    print "Constraints before solution"
    print solver.getConstraints()
    lBlobel= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "corr" in opt:
        lCorr= True
    solver.solve( lBlobel=lBlobel )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    if "m" in opt:
        _doMinosAll( solver )

    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="m" )

    print "Profile A"
    par= clsq.createClsqPar( 0, "u", solver )
    results= ca.profile( par )
    print results

    return solver
예제 #5
0
def Branchingratios( opt="m" ):

    from ConstrainedFit import clsq

    data= [ 0.265, 0.28, 0.37, 0.166, 0.42, 0.5, 0.20, 0.16, 
            0.72, 0.6, 0.37, 0.64, 0.45, 0.028, 10.0, 7.5 ]
    errors= [ 0.014, 0.05, 0.06, 0.013, 0.15, 0.2, 0.08, 0.08,
              0.15, 0.4, 0.16, 0.40, 0.45, 0.009, 5.0, 2.5 ]
    # Error scale factor a la Blobel lecture:
    if "e" in opt:
        print "Apply scaling *2.8 of error[13]"
        errors[13]= errors[13]*2.8
    covm= clsq.covmFromErrors( errors )

    upar= [ 0.33, 0.36, 0.16, 0.09, 0.055 ]
    upnames= { 0: "B1", 1: "B2", 2: "B3", 3: "B4", 4: "B5" }

    def brConstrFun( mpar, upar ):
        constraints= []
        x= []
        for i in range( 5 ):
            x.append( upar[i] )
        for i in range( 5, 21 ):
            x.append( mpar[i-5] )

        constraints.append( x[0]+x[1]+x[2]+x[3]+x[4]-1.0 )
        constraints.append( x[3]-x[5]*x[0] )
        constraints.append( x[3]-x[6]*x[0] )
        constraints.append( x[3]-x[7]*x[0] )
        
        constraints.append( x[3]-(x[1]+x[2])*x[8] )
        constraints.append( x[3]-(x[1]+x[2])*x[9] )
        constraints.append( x[3]-(x[1]+x[2])*x[10] )
        constraints.append( x[3]-(x[1]+x[2])*x[11] )
        constraints.append( x[3]-(x[1]+x[2])*x[12] )
        
        constraints.append( x[1]-(x[1]+x[2])*x[13] )
        constraints.append( x[1]-(x[1]+x[2])*x[14] )
        
        constraints.append( x[0]-(x[1]+x[2])*x[15] )
        constraints.append( x[0]-(x[1]+x[2])*x[16] )
        
        constraints.append( 3.0*x[4]-x[0]*x[17] )
        
        constraints.append( x[3]-x[18] )
        
        constraints.append( (x[1]+x[2])-x[4]*x[19] )
        constraints.append( (x[1]+x[2])-x[4]*x[20] )

        return constraints

    solver= clsq.clsqSolver( data, covm, upar, brConstrFun, epsilon=0.00001,
                             uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    lBlobel= False
    if "b" in opt:
        lBlobel= True
    solver.solve( lBlobel=lBlobel )
    lcov= False
    lcorr= False
    if "corr" in opt:
        lcov= True
        lcorr= True

    ca= clsq.clsqAnalysis( solver )
    ca.printResults( cov=lcov, corr=lcorr )

    if "m" in opt:
        _doMinosAll( solver, "u" )
    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="u" )

    return
예제 #6
0
def GaussLikelihood( opt="" ):

    from ConstrainedFit import clhood, clsq
    from scipy.stats import norm
    from math import log

    # Data and errors:
    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1 ]

    # Linear function (straight line) parameters:
    upar= [ 0.0, 1.0 ]
    upnames= { 0: "a", 1: "b" }

    # Likelihood is sum of log(Gauss) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval, error in zip( data, mpar, errors ):
            parval= parval.item()
            result-= log( norm.pdf( datum, parval, error ) )
            # result+= 0.5*((datum-parval)/error)**2
        return result

    # Constraints force linear function for each data point:
    def constrFun( mpar, upar ):
        constraints= []
        for xval, parval in zip( xabs, mpar ):
            constraints.append( upar[0] + upar[1]*xval - parval )
        return constraints

    # Configure options:
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True

    # Solution using constrained log(likelihood) minimisation:
    print "\nMax likelihood constrained fit"
    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Solution using constrained least squares:
    print "\nLeast squares constrained fit"
    covm= clsq.covmFromErrors( errors )
    solver= clsq.clsqSolver( data, covm, upar, constrFun, uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return
예제 #7
0
def Branchingratios( opt="m" ):

    from ConstrainedFit import clsq

    data= [ 0.265, 0.28, 0.37, 0.166, 0.42, 0.5, 0.20, 0.16,
            0.72, 0.6, 0.37, 0.64, 0.45, 0.028, 10.0, 7.5 ]
    errors= [ 0.014, 0.05, 0.06, 0.013, 0.15, 0.2, 0.08, 0.08,
              0.15, 0.4, 0.16, 0.40, 0.45, 0.009, 5.0, 2.5 ]
    # Error scale factor a la Blobel lecture:
    if "e" in opt:
        print( "Apply scaling *2.8 of error[13]" )
        errors[13]= errors[13]*2.8
    covm= clsq.covmFromErrors( errors )

    # PDG values as start values, last number is 5.5%, not 0.55 as in br.txt
    upar= [ 0.33, 0.36, 0.16, 0.09, 0.055 ]
    upnames= { 0: "B1", 1: "B2", 2: "B3", 3: "B4", 4: "B5" }

    def brConstrFun( mpar, upar ):
        constraints= []
        x= []
        for i in range( 5 ):
            x.append( upar[i] )
        for i in range( 5, 21 ):
            x.append( mpar[i-5] )

        constraints.append( x[0]+x[1]+x[2]+x[3]+x[4]-1.0 )
        constraints.append( x[3]-x[5]*x[0] )
        constraints.append( x[3]-x[6]*x[0] )
        constraints.append( x[3]-x[7]*x[0] )

        constraints.append( x[3]-(x[1]+x[2])*x[8] )
        constraints.append( x[3]-(x[1]+x[2])*x[9] )
        constraints.append( x[3]-(x[1]+x[2])*x[10] )
        constraints.append( x[3]-(x[1]+x[2])*x[11] )
        constraints.append( x[3]-(x[1]+x[2])*x[12] )

        constraints.append( x[1]-(x[1]+x[2])*x[13] )
        constraints.append( x[1]-(x[1]+x[2])*x[14] )

        constraints.append( x[0]-(x[1]+x[2])*x[15] )
        constraints.append( x[0]-(x[1]+x[2])*x[16] )

        constraints.append( 3.0*x[4]-x[0]*x[17] )

        constraints.append( x[3]-x[18] )

        constraints.append( (x[1]+x[2])-x[4]*x[19] )
        constraints.append( (x[1]+x[2])-x[4]*x[20] )

        return constraints

    solver= clsq.clsqSolver( data, covm, upar, brConstrFun, epsilon=0.00001,
                             uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    lBlobel= False
    if "b" in opt:
        lBlobel= True
    lResidual= False
    if "r" in opt:
        lResidual= True
    solver.solve( lBlobel=lBlobel, lResidual=lResidual )
    lcov= False
    lcorr= False
    if "corr" in opt:
        lcov= True
        lcorr= True

    ca= clsq.clsqAnalysis( solver )
    ca.printResults( cov=lcov, corr=lcorr )

    if "m" in opt:
        _doMinosAll( solver, "u" )
    if "cont" in opt:
        _doProfile2d( solver, ipar1=0, type1="u", ipar2=1, type2="u" )

    return
예제 #8
0
def GaussLikelihood( opt="" ):

    from ConstrainedFit import clhood, clsq
    from scipy.stats import norm
    from math import log

    # Data and errors:
    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1 ]

    # Linear function (straight line) parameters:
    upar= [ 0.0, 1.0 ]
    upnames= { 0: "a", 1: "b" }

    # Likelihood is sum of log(Gauss) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval, error in zip( data, mpar, errors ):
            parval= parval.item()
            result-= log( norm.pdf( datum, parval, error ) )
            # result+= 0.5*((datum-parval)/error)**2
        return result

    # Constraints force linear function for each data point:
    def constrFun( mpar, upar ):
        constraints= []
        for xval, parval in zip( xabs, mpar ):
            constraints.append( upar[0] + upar[1]*xval - parval )
        return constraints

    # Configure options:
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True

    # Solution using constrained log(likelihood) minimisation:
    print( "\nMax likelihood constrained fit" )
    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Solution using constrained least squares:
    print( "\nLeast squares constrained fit" )
    covm= clsq.covmFromErrors( errors )
    solver= clsq.clsqSolver( data, covm, upar, constrFun, uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return