Ejemplo n.º 1
0
 def weightavgLF(x):
    #see mathematica notebook "g-2_comparison.nb" for discussion of the two methods shown here.
    #L=0  #for weighted sum
    LogL=0   #for weighted average
    for PMi,deltaamui,damui in zip(PM,deltaamu,damu):
       #Add the pieces of the likelihood function together
       #L+=exp(LFs.lognormallike(x,deltaamui,sqrt(damuexp**2+damui**2)*1e-10))  #This assumes no uncertainty in the computed MSSM value, or that the SM uncertainty dominates.      
       #LogL=log(L)
       #Multiply seperate likelihood contributions together (don't need PMi contributions in this case)
       LogL+=LFs.lognormallike(x,deltaamui,sqrt(damuexp**2+damui**2)*1e-10)  #This assumes no uncertainty in the computed MSSM value, or that the SM uncertainty dominates.      
    return LogL #return the Log-Likelihood value
Ejemplo n.º 2
0
        def globlikefunc(obsdict):
            """Compute log likelihoods and global log likelihood
            Output format:
            likedict = {likename: (logL, uselike),...}
            """
            likedict = {}   #reset likelihood dictionary
            
            #Extract individual observables dictionaries from container dictionary
            specdict = obsdict['spectrum']
            decadict = obsdict['decay']
            if usemicrOmegas: darkdict = obsdict['darkmatter']
            
            #===========================================================
            # EFFECTIVE PRIOR
            #===========================================================
            #likedict['CCRprior'] = (CCRprior(specdict['BQ'],specdict['tanbQ'],specdict['muQ']), useCCRprior)
            
            #===============================================================
            #   DEFINE LIKELIHOOD CONTRIBUTIONS
            #===============================================================
            #-----------MAIN CONTRIBUTORS TO LIKELIHOOD-----------------
            
            # delta(a_mu)
            likedict['deltaamu'] = (LFs.lognormallike(x=specdict['deltaamu'], 
                mean=33.53e-10, sigma=8.24e-10), usedeltaamu)
                #1106.1315v1, Table 10, Solution B (most conservative)
            
            #=====Electroweak precision, RELIC DENSITY and (g-2)_mu=============
            if usemicrOmegas:
                #Dark matter relic density
                likedict['omegah2'] = (omegalikefunc(darkdict['omegah2'], \
                    0.1186, sqrt((0.0031)**2+(0.10*0.1186)**2)), useomegah2)         
                    # 2013 Planck data release (http://arxiv.org/abs/1303.5076)
                    # From table 2. Three numbers are relevant:
                    # Best fit, 68% confidence intercal
                    # 0.12029,     0.1196 +/- 0.0031  : Planck temp. data
                    # 0.11805,     0.1186 +/- 0.0031  :    + Planck lensing data
                    # 0.12038,     0.1199 +/- 0.0027  :    + WMAP low multipole
                    #                                 :      polarisation data
                    # Giving our somewhat arbitrary 10% extra theory uncertainty
                    # it doesn't matter which we choose (they are all within 2%
                    # of each other). Even the WMAP result (0.1123 +/- 0.0035) is
                    # only about 6% different from the central Planck result. So
                    # let's just use the "Planck only" data.
                    #
                    # old WMAP reference:
                    # 2010, 1001.4538v2, page 3 table 1 (WMAP + BAO + H0 mean)
                    # theory (second component) uncertainty stolen from 
                    # 1107.1715, need proper source.

            
            #=====Higgs constraints=============================================
            # We first need to find out which of the NMSSM Higgses is SM-like
            # Check the reduced couplings? Use those which are closest on 
            # average to 1? Crude, but good enough for first quick scan.
            
            Higgses = ['H1','H2','H3','A1','A2'] # Must match blockmap
            # Compute summed squared difference of couplings from 1
            rgsqdiff = {}
            for Higgs in Higgses:
                rgsqdiff[Higgs] = \
                    (specdict['rg_{0}_u'.format(Higgs)] - 1)**2 \
                  + (specdict['rg_{0}_d'.format(Higgs)] - 1)**2 \
                  + (specdict['rg_{0}_WZ'.format(Higgs)] - 1)**2 \
                  + (specdict['rg_{0}_g'.format(Higgs)] - 1)**2 \
                  + (specdict['rg_{0}_a'.format(Higgs)] - 1)**2 \
            # Get key of entry of rqsqdiff containing the min squared difference
            mostSMlike = min(rgsqdiff, key=rgsqdiff.get)
                
            likedict['mh_SMlike'] = (LFs.lognormallike(\
                x=specdict['M{0}'.format(mostSMlike)], \
                mean=126., sigma=sqrt(1.**2)), useLHCHiggs) 
                # Just a rough guess at the measured mass of the SM-like Higgs.
                # Replace this with a rigorous likelihood involving decay rates.
            
            #print mostSMlike, specdict['M{0}'.format(mostSMlike)], \
            #    rgsqdiff[mostSMlike]
                
            #===============Flavour constraints=========================
            # Branching ratios:
            # Switch on/off using "useBdecays" config option.
            # "Theory error" taken to be the larger of the upper/lower errors
            # returned by nmspec. ->UPDATE: now using LFs.logdoublenormal,
            # models upper and lower errors by seperate Gaussians.
            
            # BF(b->s gamma) (i.e. B_u/B_d -> X_s gamma)
            bsgexperr2 = (0.26e-4)**2+(0.09e-4)**2
            bsguperr2 = bsgexperr2+(specdict['bsgmo+eth']-specdict['bsgmo'])**2
            bsglwerr2 = bsgexperr2+(specdict['bsgmo-eth']-specdict['bsgmo'])**2              
            likedict['bsgmo'] = (LFs.logdoublenormal(x=specdict['bsgmo'],
                mean=3.55e-4, sigmaP=sqrt(bsguperr2), sigmaM=sqrt(bsglwerr2))
                                                                  , useBdecays)
            # HFAG, arXiv:1010.1589 [hep-ex], Table 129 (Average)
            # 0.09e-4 contribution added in accordance with newer HFAG edition:
            # HFAG, arXiv:1207.1158 [hep-ex], pg 203. (i.e. basically unchanged)

            # BR(B+ -> tau+ + nu_tau)
            btaunuexperr2 = (0.3e-4)**2
            btaunuuperr2 = btaunuexperr2+(specdict['B+taunu+eth']-specdict['B+taunu'])**2
            btaunulwerr2 = btaunuexperr2+(specdict['B+taunu-eth']-specdict['B+taunu'])**2              
            likedict['B+taunu'] = (LFs.logdoublenormal(x=specdict['B+taunu'],
                mean=1.67e-4, sigmaP=sqrt(btaunuuperr2), sigmaM=sqrt(btaunulwerr2))
                                                                  , useBdecays)
            # HFAG 1010.1589v3 (updated 6 Sep 2011), retrieved 12 Oct 2011
            # Table 127, pg 180
            # HFAG, arXiv:1207.1158 [hep-ex], pg 204. table 144
            # (basically unchanged from 2010 data, smaller uncertainity 
            # (0.39->0.3), mean unchanged.
            
            # BR(Bs -> mu+ mu-)
            # See comments where BsmumuLogLcurve is created from data files
            likedict['bmumu'] = (Bsmumulikefunc(specdict['bmumu']), useBdecays)
            #Folding theory error into this properly would be hard... need to
            #convolve it in for every point. Can't think of a better way.
            #Otherwise need to settle on something constant that can be
            #computed at the beginning of the run.
            #specdict['bmumu+eth']
            #specdict['bmumu-eth'] 
            
            
            
            #--------Effective prior factor-------------------------------------
            likedict['logJew'] = (\
                                logJew(specdict['lambda_Qsusy'],
                                       specdict['kappa_Qsusy'],
                                       specdict['Alambda_Qsusy'],
                                       specdict['Akappa_Qsusy'],
                                       specdict['mueff_Qsusy']/
                                           specdict['lambda_Qsusy'], # <S>=mueff/lambda
                                       specdict['TanB'],
                                       specdict['MHu^2_Qsusy'],
                                       specdict['MHd^2_Qsusy'],
                                       specdict['MZ'],
                                       specdict['g`_Qsusy'],
                                       specdict['g2_Qsusy'])                    
                    , False)
            likedict['logJrge'] = (\
                               logJrge(specdict['mueff_Qsusy']/
                                           specdict['lambda_Qsusy'], # <S>=mueff/lambda
                                       specdict['TanB'],
                                       specdict['MHu^2_Qsusy'],
                                       specdict['MHd^2_Qsusy'],
                                       specdict['lambda_Qsusy'],
                                       specdict['kappa_Qsusy'],
                                       specdict['yt_Qsusy'],
                                       specdict['g`_Qsusy'],
                                       specdict['g2_Qsusy'],
                                       specdict['g3_Qsusy'],
                                       specdict['lambda_QGUT'],
                                       specdict['kappa_QGUT'],
                                       specdict['yt_QGUT'],
                                       specdict['g`_QGUT'],
                                       specdict['g2_QGUT'],
                                       specdict['g3_QGUT'])                    
                    , False)
                    
            # Total combined effective log prior factor (for easy removal during
            # analysis)
            effprior = 0.
            if useJew:  effprior += likedict['logJew'][0]
            if useJrge: effprior += likedict['logJrge'][0]
            likedict['effprior'] = (effprior, True)
            
            #===============GLOBAL LOG LIKELIHOOD=======================
            LogL = sum( logl for logl,uselike in likedict.itervalues() if uselike )
            return LogL, likedict
Ejemplo n.º 3
0
        def globlikefunc(obsdict):
            """Compute log likelihoods and global log likelihood
            Output format:
            likedict = {likename: (logL, uselike),...}
            """
            likedict = {}  #reset likelihood dictionary

            #Extract individual observables dictionaries from container dictionary
            specdict = obsdict['spectrum']
            if usemicrOmegas: darkdict = obsdict['darkmatter']
            if useSuperISO: flavdict = obsdict['flavour']

            #NO HDECAY STUFF IN HERE
            #if useHDecay:     decadict = obsdict['decay']

            #===========================================================
            # EFFECTIVE PRIOR
            #===========================================================
            likedict['CCRprior'] = (CCRprior(specdict['BQ'], specdict['tanbQ'],
                                             specdict['muQ']), useCCRprior)

            #===============================================================
            #   DEFINE LIKELIHOOD CONTRIBUTIONS
            #===============================================================
            #Note: some things left commented in old pysusy2 format because
            #they contain useful information and I can't be bothered
            #reformatting it.
            #-----------STANDARD MODEL DATA ------------
            # this is used to constrain the nuisance parameters
            # -NB-REMOVED -> USING GAUSSIAN PRIORS INSTEAD.
            #ialphaemL = Observable(slhafile=setupobjects[SPECTRUMfile], block="SMINPUTS",
            #    index=1, average=127.918, sigma=0.018,likefunc=LFs.lognormallike)
            #    #Jun 2009 hep-ph/0906.0957v2, they reference PDG 2008, but I can't find the value myself.
            #alphasL = Observable(slhafile=setupobjects[SPECTRUMfile], block="SMINPUTS",
            #    index=3, average=0.1184, sigma=0.0007,likefunc=LFs.lognormallike)
            #    #PDG 2010 pg 101 - Physical Constants
            #MZL = Observable(slhafile=setupobjects[SPECTRUMfile], block="SMINPUTS",
            #    index=4, average=91.1876, sigma=0.0021,likefunc=LFs.lognormallike)
            #    #PDG 2010 pg 101 - Physical Constants
            #MtopL = Observable(slhafile=setupobjects[SPECTRUMfile], block="SMINPUTS",
            #    index=6, average=173.3, sigma=1.1,likefunc=LFs.lognormallike)
            #    #1007.3178, tevatron, 19 Jul 2010
            #MbotL = Observable(slhafile=setupobjects[SPECTRUMfile], block="SMINPUTS",
            #    index=5, average=4.19, sigma=0.18,likefunc=LFs.lognormallike)
            #    #PDG 2010 quark summary - http://pdg.lbl.gov/2010/tables/rpp2010-sum-quarks.pdf (NOTE - lower uncertainty is 0.06 not 0.18, altered distribution to make it symmetric for simplicity)

            likedict['MW'] = (LFs.lognormallike(
                x=specdict['MW'], mean=80.399,
                sigma=sqrt(0.023**2 + 0.015**2)), useMW)
            #PDG 2010, 2011 and partial 2012 update
            #from
            #http://lepewwg.web.cern.ch/LEPEWWG/ - extracted Apr 8 2011 (Jul 2010 average value)
            #theory (second component) uncertainty stolen from 1107.1715, need proper source (they
            #do not give one)
            #-----------MAIN CONTRIBUTORS TO LIKELIHOOD-----------------

            #=====Electroweak precision, RELIC DENSITY and (g-2)_mu========
            if usemicrOmegas:
                # delta rho parameter, describes MSSM corrections to electroweak observables
                likedict['deltarho'] = (LFs.lognormallike(
                    x=darkdict['deltarho'], mean=0.0008, sigma=0.0017),
                                        usedeltarho)
                #PDG Standard Model Review: Electroweak model and constraints on new physics, pg 33 eq 10.47.
                #Taking larger of the 1 sigma confidence internal values
                #(likelihood function is actually highly asymmetric, PDG gives
                #1 sigma: 1.0008 +0.0017,-0.0007
                #2 sigma: 1.0004 +0.0029,-0.0011
                #We are ignoring these complexities. The 0.0017 sigma should be quite on the conservative
                #side, and contributions seem to only be positive, so the weird lower sigmas are essentially
                #irrelevant anyway, in the CMSSM at least. I do not know if other MSSM models will be different,
                #I assume probably not.
                #In ISAJET have sin**2(thetaw), consider replacing deltarho with this. Values in different schemes
                #in same part of pdg, pg 30, Table 10.8.
                #Dark matter relic density
                likedict['omegah2'] = (omegalikefunc(
                    darkdict['omegah2'], 0.1123,
                    sqrt((0.0035)**2 + (0.10 * 0.1123)**2)), useomegah2)
                #Jan 2010, 1001.4538v2, page 3 table 1 (WMAP + BAO + H0 mean)
                #theory (second component) uncertainty stolen from 1107.1715, need proper source
                #Anomalous muon magnetic moment
                likedict['deltaamu'] = (LFs.lognormallike(
                    x=darkdict['deltaamu'], mean=33.53e-10, sigma=8.24e-10),
                                        usedeltaamu)
                #1106.1315v1, Table 10, Solution B (most conservative)
                #=====FLAVOUR OBSERVABLES FROM DARKMATTERfile======
                # Note: Ditching these for now. Don't use them and they just make the output confusing.
                # micrOmegas computes some of these essentially 'for free', so we may
                # as well record them for comparison.
                # NOTE: See equivalent flavour file observables definitions for references.
                #
                #bsgmoM = Observable(slhafile=setupobjects[DARKMATTERfile], block="CONSTRAINTS",
                #index=2, average=3.55e-4, sigma=sqrt((0.26e-4)**2+(0.30e-4)**2),likefunc=LFs.lognormallike, uselike=False)
                #
                #bmumuM = Observable(slhafile=setupobjects[DARKMATTERfile], block="CONSTRAINTS",
                #index=3, average=4.3e-8, sigma=0.14*4.3e-8,likefunc=CMSLHCbBsmumulikefunc(CLscurve,minBR,maxBR), uselike=False)
                #
                #obsorder += ['bsgmoM','bmumuM']

                #=====DARK MATTER DIRECT DETECTION========
                # LSP-nucleon cross sections                            # uncertainty in LSP-proton SI cross-section
                # Note, this is based on uncertainties in hadronic scalar coefficients, currently hardcoded into micromegas.
                # These uncertainties are propagated through a modified version of micromegas alongside the amplitude calculation.
                # LSP-proton SI cross-section
                likedict['sigmaLSPpSI'] = (xenonlikefunc(
                    darkdict['sigmaLSPpSI'], specdict['Mneut1'],
                    darkdict['dsigmaLSPpSI']), useDMdirect)
                #likelihood function built from information in fig 5 of 1104.2549, 100 days of Xenon100 data (Apr 2011)
            #=====FLAVOUR OBSERVABLES=============
            if useSuperISO:
                #Branching ratios
                # BF(b->s gamma) - USING SUPERISO VALUE FOR LIKELIHOOD. Micromegas value recorded for comparison
                likedict['bsgmo'] = (LFs.lognormallike(
                    x=flavdict['bsgmo'], mean=b2sg[0], sigma=b2sg[1]), True)

                # BF(Bs -> mu+mu-) - USING SUPERISO VALUE FOR LIKELIHOOD. Micromegas value recorded for comparison
                #likedict['bmumu'] = (Bsmumulikefunc(flavdict['bmumu']), True)  #previous fancy version, using simplified likelihood for now
                likedict['bmumu'] = (LFs.logerfupper(
                    x=flavdict['bmumu'], limit=b2mumu[0], sigma=b2sg[1]), True)

                #might as well leave the other limits in unenforced

                # BF(B_u -> tau nu) / BF(B_u -> tau nu)_SM
                # ButaunuButaunuSM = MultiIndexObservable(setupobjects[LOWENERGYfile],LOWENERGYblock,(521,2,0,2,-15,16),
                #        average=1.28, sigma=0.38,likefunc=LFs.lognormallike)
                #    #copying 1107.1715, need to find proper source.

                #REPLACING THE ABOVE RATIO, it is more straightforward to impose constraints directly on the branching ratio itself
                #BF(B_u -> tau nu)
                likedict['Butaunu'] = (LFs.lognormallike(
                    x=flavdict['Butaunu'], mean=1.67e-4, sigma=0.39e-4), False)
                #Heavy Flavour Averaging Group (HFAG)
                #1010.1589v3 (updated 6 Sep 2011), retrieved 12 Oct 2011
                #Table 127, pg 180
                # Delta0(B->K* gamma) (hope this is the same as Delta0-, seems like it might be)
                # (isospin asymmetry)
                likedict['Delta0'] = (LFs.lognormallike(
                    x=flavdict['Delta0'],
                    mean=0.029,
                    sigma=sqrt(0.029**2 + 0.019**2 + 0.018**2)), False)
                # BaBAR, 2008 - 0808.1915, pg 17. No theory error included, pieces are different aspects of experimental error.
                # BR(B+->D0 tau nu)/BR(B+-> D0 e nu)
                likedict['BDtaunuBDenu'] = (LFs.lognormallike(
                    x=flavdict['BDtaunuBDenu'], mean=0.416, sigma=0.128),
                                            False)
                # BaBAR, 2008 - 0709.1698, pg 7, Table 1 (R value). No theory error included.
                # R_l23: involves helicity suppressed K_l2 decays. Equals 1 in SM.
                likedict['Rl23'] = (LFs.lognormallike(
                    x=flavdict['Rl23'], mean=1.004, sigma=0.007), False)
                # FlaviaNet Working Group on Kaon Decays
                # 0801.1817v1, pg 29, Eq. 4.19. No theory error included (meaning SuperISO error, as for other observables)
                # BR(D_s->tau nu)
                likedict['Dstaunu'] = (LFs.lognormallike(
                    x=flavdict['Dstaunu'],
                    mean=0.0538,
                    sigma=sqrt((0.0032)**2 + (0.002)**2)), False)
                #Heavy Flavour Averaging Group (HFAG)
                #1010.1589v3 (updated 6 Sep 2011), retrieved 12 Oct 2011
                #Figure 68, pg 225. Theory error stolen from 1107.1715, need to find proper source.
                # BR(D_s->mu nu)
                likedict['Dsmunu'] = (LFs.lognormallike(
                    x=flavdict['Dsmunu'],
                    mean=0.00581,
                    sigma=sqrt((0.00043)**2 + (0.0002)**2)), False)
                #Heavy Flavour Averaging Group (HFAG)
                #1010.1589v3 (updated 6 Sep 2011), retrieved 14 Oct 2011
                #Figure 67, pg 224. Theory error stolen from 1107.1715, need to find proper source.
            #=================Direct sparticle search limits=============================
            nmass = np.abs(
                specdict['Mneut1']
            )  #get abs because spectrum generator returns negative values sometimes
            smass = np.abs(specdict['MseL'])
            likedict['MseL'] = (LFs.logsteplower(
                x=smass,
                limit=99 if nmass < 84 else 96 if smass - nmass > 6 else 73),
                                useDSL)  #Phys. Lett. B544 p73 (2002)
            smass = np.abs(specdict['MsmuL'])
            likedict['MsmuL'] = (LFs.logsteplower(
                x=smass, limit=94.4 if smass - nmass > 6 else 73),
                                 useDSL)  #Phys. Lett. B544 p73 (2002)
            smass = np.abs(specdict['Mstau1'])
            likedict['Mstau1'] = (LFs.logsteplower(
                x=smass, limit=86 if smass - nmass > 8 else 73),
                                  useDSL)  #Phys. Lett. B544 p73 (2002)
            smass = np.abs(specdict['MesnuL'])
            likedict['MesnuL'] = (LFs.logsteplower(
                x=smass, limit=43 if smass - nmass < 10 else 94),
                                  useDSL)  #Phys. Lett. B544 p73 (2002)
            smass = np.abs(specdict['Mchar1'])
            likedict['Mchar1'] = (
                LFs.logsteplower(
                    x=smass, limit=97.1 if (smass - nmass) > 3 else 45), useDSL
            )  #45 GeV limit from PDG 2010, unconditional limit from invisible Z width, other limit from Eur. Phys. J. C31 p421 (2003)
            #NOTE: USING SAME LIMITS FOR L AND R SQUARKS, CHECK THAT THIS IS FINE.
            smass = np.abs(specdict['Mstop1'])
            likedict['Mstop1'] = (
                LFs.logsteplower(
                    x=smass, limit=95 if smass - nmass > 8 else 63), useDSL
            )  #? Damn don't seem to have written down where this comes from. Need to find out.
            smass = np.abs(
                specdict['Msbot1']
            )  #NOTE: stop2 heavier than stop1 by definition so only need to bother applying limit to stop1. (1 and 2 are the mass eigenstates, mixtures of L and R interaction eigenstates). Same for sbot2
            likedict['Msbot1'] = (LFs.logsteplower(
                x=smass, limit=93 if smass - nmass > 8 else 63), useDSL)
            #NOTEL USING SAME LIMITS FOR REST OF SQUARKS, CHECK THIS ABOVE, 'squarklikefunc'.
            #likelihood function defined above, usage: squarklikefunc(smassIN,nmassIN) (smass - THIS sparticle mass, nmass - neutralino 1 mass)
            #NOTE: L and R states are same as 1 and 2 states for other squarks because off diagonal terms in the mixing matrices are
            #negligible. This is not true for the third generation squarks which is why we are sure to label them 1 and 2 above (1=lightest)
            for Msquark in [
                    'MsupL', 'MsupR', 'MsdownL', 'MsdownR', 'MsstrangeL',
                    'MsstrangeR', 'MscharmL', 'MscharmR'
            ]:
                likedict[Msquark] = (squarklikefunc(specdict[Msquark], nmass),
                                     useDSL)
            likedict['Mgluino'] = (
                LFs.logerflower(x=specdict['Mgluino'], limit=289,
                                sigma=15), useDSL
            )  #from 08093792 - not well checked, probably need to update with LHC data.

            # ---------------NOTE: LEP HIGGS MASS LIMIT-------------------. Using digitized LEP limit from hep-ph/0603247 (2006), fig 3a
            # Using digitized curve to compute m_h bound appropriate to g_ZZh coupling for each model point with estimated 3 GeV sigma for m_h returned by SoftSusy
            # Need the following observables to be extracted to compute Higgs likelihood function:
            # alpha,tanbetaQ    #RGE running of tanb is slow so tanbQ should be approximately the EW tanb value
            # Likelihood function for this is written above (needs to return a lower limit erf likelihood depending on g_ZZh):
            #higgs=lightest higgs mass;alpha=higgs scalar mixing angle;tanbeta=tan(higgs VEV ratio)
            mhiggs = np.abs(specdict['Mh0'])
            #likedict['Mh0'] = (LFs.logerflower(x=mhiggs, limit=higgslimitfunc(specdict['alpha'],specdict['tanbQ'],mHlimitcurve), sigma=3), useHiggs)

            #now using LHC measured higgs mass
            #NOTE! SET TO TRUE! IGNORES WHATEVER IS SPECIFIED IN CONFIG FILE!
            likedict['Mh0'] = (LFs.lognormallike(
                x=mhiggs, mean=mh[0], sigma=mh[1]), True)

            #No fancy HDecay stuff in here.

            #===============GLOBAL LOG LIKELIHOOD=======================
            LogL = sum(
                logl for logl, uselike in likedict.itervalues() if uselike)
            return LogL, likedict