Exemple #1
0
def PoissonLikelihood( opt="" ):
    
    from ConstrainedFit import clhood
    from ConstrainedFit import clsq
    from scipy.special import gammaln
    from scipy.stats import poisson, norm
    from math import log

    # Data, two counts, errors not needed(!):
    data= [ 9.0, 16.0 ]
    # errors= [ 3.0, 4.0 ]
    mpnames= { 0: "count 1", 1: "count 2" }

    # Fit variable is parameter of poisson distribution:
    upar= [ 12.0 ]
    upnames= { 0: "mu" }

    # Likelihood is sum of log(poisson) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval in zip( data, mpar ):
            parval= parval.item()
            result-= log( poisson.pmf( datum, parval ) )
            # Calculated log(poisson):
            # result-= datum*log( parval ) - gammaln( datum+1.0 ) - parval
        return result

    # Constraints force poisson distribution with same parameter
    # for every data point:
    def constrFun( mpar, upar ):
        return [ mpar[0] - upar[0], 
                 mpar[1] - upar[0] ]

    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, 
                                 uparnames=upnames, mparnames=mpnames )
    print "Constraints before solution"
    print solver.getConstraints()
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True
    solver.solve( lBlobel=lBlobel, lpr=lPrint )

    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return
Exemple #2
0
def GaussLikelihood( opt="" ):

    from ConstrainedFit import clhood, clsq
    from scipy.stats import norm
    from math import log

    # Data and errors:
    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1 ]

    # Linear function (straight line) parameters:
    upar= [ 0.0, 1.0 ]
    upnames= { 0: "a", 1: "b" }

    # Likelihood is sum of log(Gauss) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval, error in zip( data, mpar, errors ):
            parval= parval.item()
            result-= log( norm.pdf( datum, parval, error ) )
            # result+= 0.5*((datum-parval)/error)**2
        return result

    # Constraints force linear function for each data point:
    def constrFun( mpar, upar ):
        constraints= []
        for xval, parval in zip( xabs, mpar ):
            constraints.append( upar[0] + upar[1]*xval - parval )
        return constraints

    # Configure options:
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True

    # Solution using constrained log(likelihood) minimisation:
    print "\nMax likelihood constrained fit"
    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Solution using constrained least squares:
    print "\nLeast squares constrained fit"
    covm= clsq.covmFromErrors( errors )
    solver= clsq.clsqSolver( data, covm, upar, constrFun, uparnames=upnames )
    print "Constraints before solution"
    print solver.getConstraints()
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return
Exemple #3
0
def GaussLikelihood( opt="" ):

    from ConstrainedFit import clhood, clsq
    from scipy.stats import norm
    from math import log

    # Data and errors:
    xabs= [ 1.0, 2.0, 3.0, 4.0, 5.0 ]
    data= [ 1.1, 1.9, 2.9, 4.1, 5.1 ]
    errors= [ 0.1, 0.1, 0.1, 0.1, 0.1 ]

    # Linear function (straight line) parameters:
    upar= [ 0.0, 1.0 ]
    upnames= { 0: "a", 1: "b" }

    # Likelihood is sum of log(Gauss) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval, error in zip( data, mpar, errors ):
            parval= parval.item()
            result-= log( norm.pdf( datum, parval, error ) )
            # result+= 0.5*((datum-parval)/error)**2
        return result

    # Constraints force linear function for each data point:
    def constrFun( mpar, upar ):
        constraints= []
        for xval, parval in zip( xabs, mpar ):
            constraints.append( upar[0] + upar[1]*xval - parval )
        return constraints

    # Configure options:
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True

    # Solution using constrained log(likelihood) minimisation:
    print( "\nMax likelihood constrained fit" )
    solver= clhood.clhoodSolver( data, upar, lfun, constrFun, uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Solution using constrained least squares:
    print( "\nLeast squares constrained fit" )
    covm= clsq.covmFromErrors( errors )
    solver= clsq.clsqSolver( data, covm, upar, constrFun, uparnames=upnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    return
Exemple #4
0
def PoissonLikelihood( opt="" ):

    from ConstrainedFit import clhood
    from ConstrainedFit import clsq
    from scipy.special import gammaln
    from scipy.stats import poisson
    from math import log

    # Data, two counts, errors not needed(!):
    data= [ 9.0, 16.0 ]
    # errors= [ 3.0, 4.0 ]
    mpnames= { 0: "count 1", 1: "count 2" }

    # Fit variable is parameter of poisson distribution:
    upar= [ 12.0 ]
    upnames= { 0: "mu" }

    # Likelihood is sum of log(poisson) for each data point:
    def lfun( mpar ):
        result= 0.0
        for datum, parval in zip( data, mpar ):
            parval= parval.item()
            result-= poisson.logpmf( datum, parval )
            # Calculated log(poisson):
            # result-= datum*log( parval ) - gammaln( datum+1.0 ) - parval
        return result

    # Constraints force poisson distribution with same parameter
    # for every data point:
    def constrFun( mpar, upar ):
        return [ mpar[0] - upar[0],
                 mpar[1] - upar[0] ]

    solver= clhood.clhoodSolver( data, upar, lfun, constrFun,
                                 uparnames=upnames, mparnames=mpnames )
    print( "Constraints before solution" )
    print( solver.getConstraints() )
    lBlobel=False
    lPrint= False
    lCorr= False
    if "b" in opt:
        lBlobel= True
    if "p" in opt:
        lPrint= True
    if "c" in opt:
        lCorr= True
    solver.solve( lBlobel=lBlobel, lpr=lPrint )
    ca= clsq.clsqAnalysis( solver )
    ca.printResults( corr=lCorr )

    # Minuit fit of the two count likelihood, corresponds
    # to max L solution
    from AverageTools import minuitSolver
    def fcn( n, grad, fval, par, ipar ):
        mu= par[0]
        lsum= 0.0
        for datum in data:
            lsum-= poisson.logpmf( datum, mu )
            # Calculated log(poisson):
            # lsum-= datum*log( mu ) - gammaln( datum+1.0 ) - mu
        fval.value= lsum
        return
    par= [ 12.0 ]
    parerr= [ 1.0 ]
    parname= [ "mu" ]
    ndof= 1
    solver= minuitSolver.minuitSolver( fcn, par, parerr, parname, ndof )
    solver.minuitCommand( "SET ERRDEF 0.5" )
    solver.solve()
    solver.printResults()

    return