def Calclims():
    # We are now working in distributions as a function of s1
    mlist = np.logspace(1, 3.9, 100)  # GeV
    ULlist_Xenon100T = []
    Bdipolenorm = 1e-10

    b_dict = load_bkgs()
    obsT = np.ones_like(s1means) * 35636.
    obsT2 = np.ones_like(s1means) * 35636. * 100
    b = np.array(b_dict[bkgs[0]] / obsT)
    B = [
        b_dict[bkgs[0]] / obsT, b_dict[bkgs[1]] / obsT, b_dict[bkgs[2]] / obsT,
        b_dict[bkgs[3]] / obsT, b_dict[bkgs[4]] / obsT, b_dict[bkgs[5]] / obsT
    ]

    SF = sf.Swordfish(B, T=[0.01, 0.01, 0.01, 0.01, 0.01, 0.01], E=obsT)
    SF2 = sf.Swordfish(B, T=[0.01, 0.01, 0.01, 0.01, 0.01, 0.01], E=obsT2)

    ULlist_Xenon1T_milli = []
    ULlist_Xenon100T_milli = []

    for m in mlist:
        dRdS = np.array(
            dRdS1(s1means, m, mu_x=Bdipolenorm, m_dipole=True) * s1width)
        UL1T = SF.upperlimit(dRdS, 0.1)
        UL100T = SF2.upperlimit(dRdS, 0.1)
        ULlist_Xenon1T_milli.append(UL1T * Bdipolenorm**2)
        ULlist_Xenon100T_milli.append(UL100T * Bdipolenorm**2)

    ULlist_Xenon1T_milli = np.array(np.sqrt(ULlist_Xenon1T_milli))
    ULlist_Xenon100T_milli = np.array(np.sqrt(ULlist_Xenon100T_milli))
    return interp1d(mlist,
                    ULlist_Xenon1T_milli), interp1d(mlist,
                                                    ULlist_Xenon100T_milli)
def plt_lims():
    mlist = np.logspace(1, 4., 100)  # GeV
    ULlist_Xenon100T = []
    norm01 = 9.756e-10
    cp1 = np.zeros(11)
    cp1[0] = norm01
    cn1 = cp1

    b_dict = load_bkgs()
    obsT = np.ones_like(s1means) * 35636.
    obsT2 = np.ones_like(s1means) * 35636. * 100
    b = np.array(b_dict[bkgs[0]] / obsT)
    B = [
        b_dict[bkgs[0]] / obsT, b_dict[bkgs[1]] / obsT, b_dict[bkgs[2]] / obsT,
        b_dict[bkgs[3]] / obsT, b_dict[bkgs[4]] / obsT, b_dict[bkgs[5]] / obsT
    ]

    SF = sf.Swordfish(B, T=[0.01, 0.01, 0.01, 0.01, 0.01, 0.01], E=obsT)
    SF2 = sf.Swordfish(B, T=[0.01, 0.01, 0.01, 0.01, 0.01, 0.01], E=obsT2)

    ULlist_Xenon1T = []
    ULlist_Xenon100T = []

    for m in mlist:
        dRdS = np.array(dRdS1(s1means, m, cp1, cn1) * s1width)
        UL1T = SF.upperlimit(dRdS, 0.1)
        UL100T = SF2.upperlimit(dRdS, 0.1)
        ULlist_Xenon1T.append(UL1T * norm01**2)
        ULlist_Xenon100T.append(UL100T * norm01**2)

    ULlist_Xenon1T = np.sqrt(np.array(ULlist_Xenon1T))
    ULlist_Xenon100T = np.sqrt(np.array(ULlist_Xenon100T))

    mp = 0.938  # GeV
    mu = mlist * mp / (mlist + mp)

    sig_SI_X1T = (ULlist_Xenon1T)**2 * (mu**2 / np.pi) * (1.98e-14**2)
    sig_SI_X100T = (ULlist_Xenon100T)**2 * (mu**2 / np.pi) * (1.98e-14**2)

    plt.loglog(mlist, sig_SI_X1T, label=r"UL XENON1T (2017)")
    plt.loglog(mlist, sig_SI_X100T, label=r"UL XENONnT")
    plt.scatter(mlist[np.argmax(ULlist_Xenon1T)], np.max(ULlist_Xenon1T))
    temp = interp1d(mlist, sig_SI_X1T)
    return temp(10**3.9)
Esempio n. 3
0
 def getSwordfish(self, E=None, ignore_cov=False):
     if E is not None and type(E) != float:
         E = E.flatten()
     B = self.__call__().flatten()
     if not ignore_cov:
         K = self.cov()
     else:
         K = None
     SF = sf.Swordfish([B], K=K, E=E)
     return SF
def Calclims():
    # We are now working in distributions as a function of s1
    norm01 = 9.756e-10
    mlist = np.logspace(1, 3.9, 100) # GeV
    cp = np.random.randn(11)
    cp[:] = 0.
    cp[0] = norm01
    cn = cp
    ULlist = []

    b_dict = load_bkgs()
    obsT = np.ones_like(s1means)*35636.
    obsT2 = np.ones_like(s1means)*35636.*100
    b = np.array(b_dict[bkgs[0]]/obsT)
    B = [b_dict[bkgs[0]]/obsT, b_dict[bkgs[1]]/obsT, b_dict[bkgs[2]]/obsT,
        b_dict[bkgs[3]]/obsT, b_dict[bkgs[4]]/obsT, b_dict[bkgs[5]]/obsT]

    SF = sf.Swordfish(B, T=[0.01,0.01,0.01,0.01,0.01,0.01], E=obsT)
    SF2 = sf.Swordfish(B, T=[0.01,0.01,0.01,0.01,0.01,0.01], E=obsT2)

    norm01 = 9.756e-10
    mlist = np.logspace(1, 3.9, 100) # GeV
    cp = np.random.randn(11)
    cp[:] = 0.
    cp[0] = norm01
    cn = cp
    ULlist = []
    ULlist_Xenon100T = []

    for i, m in enumerate(mlist):
        dRdS = dRdS1(s1means, m, cp, cn)*s1width
        UL = SF.upperlimit(dRdS, 0.1)
        UL100 = SF2.upperlimit(dRdS, 0.1)
        ULlist_Xenon100T.append(UL100*norm01**2)
        ULlist.append(UL*norm01**2)

    ULlist = np.array(np.sqrt(ULlist))
    ULlist_Xenon100T = np.array(np.sqrt(ULlist_Xenon100T))
    # plt.loglog(mlist, ULlist)
    # plt.loglog(mlist, ULlist_Xenon100T)
    # plt.show()
    return interp1d(mlist, ULlist), interp1d(mlist, ULlist_Xenon100T)
Esempio n. 5
0
def lnL():
    x = np.linspace(1, 10, 20)
    flux = [x**2.4, x / 2]
    noise = np.ones_like(flux[0])
    exposure = np.ones_like(flux[0]) * 0.1
    systematics = np.ones((20, 20)) * 1.
    #systematics = None
    s = sf.Swordfish(flux, noise, systematics, exposure)
    #syst = noise.copy()
    #syst[1] *= 2.000
    print s.profile_lnL(np.array([0.25, 1.0]),
                        np.array([0.23, 1.0]),
                        free_thetas=np.array([False, True]))
Esempio n. 6
0
def test_MW_dSph():
    nside = 32

    def plot_harp(h, filename):
        m = h.get_healpix(128)
        vmax = np.log10(m).max()
        vmin = vmax - 2
        hp.mollview(np.log10(m), nest=True, min=vmin, max=vmax)
        plt.savefig(filename)

    dims = ()

    # Signal definition
    spec = 1.
    MW = harp.HARPix(dims=dims).add_iso(2).add_singularity((0, 0),
                                                           0.1,
                                                           2.,
                                                           n=10)
    MW.add_func(lambda d: spec / (1 + d)**2, mode='dist', center=(0, 0))
    pos = (50, 40)
    #    dSph = harp.HARPix(dims = dims).add_singularity(pos, 0.1, 20, n = 10)
    #    dSph.add_func(lambda d: 0.1*spec/(.1+d)**2, mode = 'dist', center=pos)
    sig = MW  #+ dSph
    sig.data += .001  # EGBG
    plot_harp(sig, 'sig.eps')

    # Background definition
    bg = harp.HARPix(dims=dims).add_iso(64)
    bg.add_func(lambda l, b: 1 / (b + 1)**2)
    plot_harp(bg, 'bg.eps')

    # Covariance matrix definition
    cov = harpix_Sigma(sig)
    cov.add_systematics(err=bg * 0.1, sigma=10., Sigma=None, nside=0)

    # Set up swordfish
    fluxes = [sig.data.flatten()]
    noise = bg.get_formatted_like(sig).data.flatten()
    systematics = cov
    exposure = np.ones_like(noise) * 0.001
    m = sf.Swordfish(fluxes, noise, systematics, exposure, solver='direct')

    F = m.effectiveinfoflux(0)
    f = harp.HARPix.from_data(sig, F)
    plot_harp(f, 'test.eps')
Esempio n. 7
0
def test():
    H = harp.HARPix().add_iso(4).add_disc((80, 00), 40, 16).add_disc((-80, 00),
                                                                     40, 16)
    H.data += 1.

    # Covariance matrix definition
    cov = harpix_Sigma(H)
    flat = harp.HARPix().add_iso(16, fill=1.)
    cov.add_systematics(err=flat * .1, sigma=3, Sigma=None, nside=128)

    # Set up swordfish
    fluxes = [H.get_data(mul_sr=True)]
    noise = fluxes[0]
    expmap = noise * 0. + 1e5
    systematics = cov
    m = sf.Swordfish(fluxes, noise, systematics, expmap, solver='cg')

    F = m.effectiveinfoflux(0)
    f = harp.HARPix.from_data(H, F, div_sr=True)
    print min(f.data), max(f.data)

    m = f.get_healpix(128)
    hp.mollview(m, nest=True)
    plt.savefig("MW.eps")
def Euclideanizemilli(nsamples=100000):
    Limit1, Limit2 = Calclims()
    b_dict = load_bkgs()
    obsT = np.ones_like(s1means) * 35636. * 100
    b = np.array(b_dict[bkgs[0]] / obsT)
    B = [
        b_dict[bkgs[0]] / obsT, b_dict[bkgs[1]] / obsT, b_dict[bkgs[2]] / obsT,
        b_dict[bkgs[3]] / obsT, b_dict[bkgs[4]] / obsT, b_dict[bkgs[5]] / obsT
    ]

    couplings = []
    ESXe = []
    ESAr = []
    NXe = []
    NAr = []
    NuisanceES = []
    ms = np.logspace(1, 3.9, nsamples / 1000)

    sigmav_mean = 156.0
    err_sigv = 13.
    vesc_mean = 533.0
    err_vesc = 54.
    vlag_mean = 242.0
    err_vlag = 10.

    from random import randint
    sigmav = np.random.uniform(sigmav_mean - (2. * err_sigv),
                               sigmav_mean + (2. * err_sigv), int(nsamples))
    vesc = np.random.uniform(vesc_mean - (2. * err_vesc),
                             vesc_mean + (2. * err_vesc), int(nsamples))
    vlag = np.random.uniform(vlag_mean - (2. * err_vlag),
                             vlag_mean + (2. * err_vlag), int(nsamples))

    random_points = np.unique(
        [randint(0, vlag.shape[0] - 1) for _ in range(int(nsamples * 0.2))])
    sigmav[random_points] = sigmav_mean
    vesc[random_points] = vesc_mean
    vlag[random_points] = vlag_mean

    for m in tqdm(ms, desc="Euclideanizing Xenon-nT"):
        i = 0
        coupling_temp = np.logspace(np.log10(Limit1(m)), np.log10(Limit2(m)),
                                    nsamples / 100)
        for mu_x in coupling_temp:
            couplings.append([m, 0., mu_x])
            dRdS, Nsig = np.array(
                dRdS1(s1means,
                      m,
                      mu_x=mu_x,
                      m_dipole=True,
                      Nevents=True,
                      sigmav=sigmav[i],
                      vesc=vesc[i],
                      vlag=vlag[i]))

            SF = sf.Swordfish(B,
                              T=[0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
                              E=obsT)
            NXe.append(Nsig * obsT[0])
            ES_temp = SF.euclideanizedsignal(dRdS * s1width)

            NuisanceES_temp = [(sigmav[i] - sigmav_mean) / err_sigv]
            NuisanceES_temp.append((vesc[i] - vesc_mean) / err_vesc)
            NuisanceES_temp.append((vlag[i] - vlag_mean) / err_vlag)
            NuisanceES.append(NuisanceES_temp)
            ESXe.append(ES_temp)
            i += 1

    couplings = np.array(couplings)

    obsT = np.ones_like(ER_c) * 1422.
    obsT2 = np.ones_like(ER_c) * 1422. * 100. * 50.
    b = np.zeros_like(ER_c) + 0.1 / obsT2 / len(ER_c)
    SFAr = sf.Swordfish([b], T=[0.01], E=obsT2)
    eff_temp = load_efficiency()

    for i, m in enumerate(tqdm(couplings[:, 0], desc="Euclideanizing DS20k")):
        m = couplings[i, 0]
        mu_x = couplings[i, 2]

        dRdE = dRdEAr(m,
                      ER_c,
                      eff_temp,
                      mu_x=mu_x,
                      m_dipole=True,
                      sigmav=sigmav[i],
                      vesc=vesc[i],
                      vlag=vlag[i])
        ES_temp = SFAr.euclideanizedsignal(dRdE * ER_width)
        ESAr.append(ES_temp)


#######################

    ESXe = np.array(ESXe)
    ESAr = np.array(ESAr)
    NXe = np.array(NXe)
    NuisanceES = np.array(NuisanceES)

    # Output to new hdf5
    outfile = "../hdf5/Xenon100T_DS20k_gridscanBdipole_HaloTrue.hdf5"
    hf = h5py.File(outfile, 'w')
    hf.create_dataset('ESAr', data=ESAr)
    hf.create_dataset('ESXe', data=ESXe)
    hf.create_dataset('NuisanceES', data=NuisanceES)
    hf.create_dataset('c', data=couplings)
    hf.create_dataset('NXe', data=NXe)
    # hf.create_dataset('NAr', data=NAr)
    hf.close()
    return None
Esempio n. 9
0
def DD(m_DM, UL=True):
    """ This routine will try to recreate the limits from 1708.08571 for the CRESST-III Experiment, a future version of CRESST"""
    c_kms = c * 1.e-3  # in km s^-1
    GeV_kg = 1.8e-27
    sGeV = 1 / 6.58e-25
    mGeV = 1 / 0.197e-15
    kgs = sGeV / GeV_kg  # = kg * s in natural units
    h = 4.135667662e-15  # eV s
    h_MeVs = h / 1.e6
    m_DM *= 1.e3  # conversion to MeV
    rho_0 = 0.3  # GeV/cm3
    rho_0 *= 1e3 * 1e2**3 / mGeV**3 * 1e3**3  # GeV/cm3 --> MeV^4
    xi_T = 1.  # FIXME: Need real values
    m_T = 68. * 1.e3  # MeV FIXME: Need real values, currently set to Germanium
    m_med = 10.  # MeV
    muT = m_DM * m_T / (m_DM + m_T)

    # Define energy range in MeV
    E = np.linspace(0.1 * 1e-3, 2. * 1e-3, num=19)
    Ewidth = E[1] - E[0]
    Emeans = E[0:-1] + Ewidth / 2.

    def eta_F():
        v, gave = np.loadtxt("DD_files/gave.dat", unpack=True, dtype=float)
        f = interp1d(v, gave)  # Natural units
        return f

    def dRdE(E_R):
        """Return differential recoil rate in 1/s/keV/kg."""
        # g is the parameter we wish to set a limit on so is left out
        # Form factor taken from eq4.4 of http://pa.brown.edu/articles/Lewin_Smith_DM_Review.pdf
        # TODO: Z,A,theta dependence missing
        # FIXME: Should correct for realistic form factor
        F_T = lambda E_R: np.sqrt(np.exp(-(1. / 3.) * (E_R**2.)))
        vmin = lambda E_R: np.sqrt(m_T * E_R / 2 / (muT**2.))
        eta = eta_F()
        signal = rho_0 * xi_T * g2 * F_T(E_R)**2 * eta(
            vmin(E_R)) / 2. / np.pi / m_DM / ((2 * m_T * E_R + m_med**2)**2)
        signal *= kgs  # 1/MeV -->  1/s/MeV/kg
        return signal

    def ScatterProb(q, E1, E2):
        # CRESST definitions
        # Energy resoultion is set by a Gaussian at 20 eV
        # FIXME: Check energy resolution implementation
        # var = lambda x: np.exp(((x-muCRESST)**2.)/2./(sigmaCRESST**2))/2./np.pi
        E_R = q**2. / 2. / m_T
        # var = 20.*1.e-6 # MeV
        var = 1.  # MeV
        # print erf((E1-E_R)/np.sqrt(2)/var)
        prob = (erf((E2 - E_R) / np.sqrt(2) / var) - erf(
            (E1 - E_R) / np.sqrt(2) / var)) / 2.
        # prob = 1.
        # print "Probability", prob
        return prob

    Eth = 100 * 1e-6  # threshold energy [MeV]
    Vesc = 544. / c_kms
    Vobs = 232. / c_kms
    qmin = np.sqrt(2. * m_T * Eth)  # MeV
    qmax = 2. * muT * (Vesc + Vobs)  # MeV
    E_Rmin = qmin**2. / 2. / m_T
    E_Rmax = qmax**2. / 2. / m_T

    if qmin > qmax:
        print "Skip m_DM [GeV]", m_DM * 1e-3
        return None


#    sig = np.zeros(len(Emeans))
#    sig_dif = lambda ER, x1, x2: ScatterProb(ER, x1, x2)*dRdE(ER)
#
#    for i in range(len(Emeans)):
#        sig[i] = quad(sig_dif, E_Rmin, E_Rmax, args=(E[i],E[i+1]))[0]

    sig = dRdE(Emeans) * Ewidth

    # print dRdE_A, qdiff
    # print Emeans, sig
    # plt.loglog(Emeans, sig)
    # plt.xlabel(r'$E_R (MeV)$')
    # plt.ylabel(r'$dR/dE_R$')
    # plt.show()
    # quit()

    ################################### Background definitions

    # Backgrounds
    # For CREST-III we assume 1000kg days of exposure with energy threshold of 100eV. There are 19 bins between 0.1 and 2 keV

    # Background level assumed to be 3.5e-2 keV^-1 kg^-1 day^-1
    #bkg = np.zeros(len(E))
    #bkg += 3.5
    bkg = np.ones_like(sig) * 3.5e-2 * (Ewidth * 1e3) / (3600. * 24)

    # Covariance matrix for energy spectrum uncertainty (ad hoc)
    # Sigma = get_sigma(Emeans, lambda x, y: np.exp(-(x-y)**2/2/(x*y)/0.5**2))

    # Set up the swordfish
    unc = 0.01  # 1% bkg uncertainty
    corr_length = 1  # 10 deg correlation length of bkg uncertainty

    ################################### Exposure Goes here
    obsT = np.ones_like(sig) * 100 * 3600  # 100 hours of observation in s

    if UL:
        systematics = None
        m = sf.Swordfish(sig,
                         bkg,
                         systematics,
                         obsT,
                         solver='cg',
                         verbose=True)

        # Calculate upper limits with effective counts method
        ec = sf.EffectiveCounts(m)
        UL = ec.upperlimit(0.05, 0, gaussian=True)
        s, b = ec.effectivecounts(0, 1.)

        print "DM mass [GeV]:", m_DM * 1e-3
        print "Total signal counts (theta = 1):", ec.counts(0, 1.0)
        print "Eff.  signal counts (theta = 1):", s
        print "Eff.  bkg counts (theta = 1)   :", b
        print "Upper limit on theta           :", UL

        #F = m.effectiveinfoflux(0)
        #f = harp.HARPix.from_data(sig, F, div_sr = True)
        #m = f.get_healpix(512, idxs=(3,))
        #hp.mollview(m, nest = True)
        #plt.savefig('test.eps')
        return UL
    else:
        # fluxes, noise, systematics, exposure = get_model_input([sig, dsig], bkg,
        #         [dict(err = bkg*unc, sigma = corr_length, Sigma = None, nside =
        #            0)], expo)
        # systematics = None
        m = sf.Swordfish(fluxes,
                         noise,
                         systematics,
                         exposure,
                         solver='cg',
                         verbose=False)
        F = m.fishermatrix()
        return F
Esempio n. 10
0
def CTA(m_DM, UL=True, syst_flag=True, Tobs=100.):
    # Parameters
    E = Logbins(1.0, 3.0, 50)  # GeV 10 GeV - 10 TeV
    unc = 0.01  # 1% bkg uncertainty
    corr_length = 1  # 10 deg correlation length of bkg uncertainty
    Sigma = get_sigma(E.means, lambda x, y: np.exp(-(x - y)**2 / 2 /
                                                   (x * y) / 0.5**2))
    sv0 = 1e-26

    # Get J-value map
    J = get_Jmap()

    # Define signal spectrum
    t = sf.func_to_templates(lambda x, y: get_sig_spec(x * sv0, y, E),
                             [1., m_DM],
                             dx=[.01, m_DM * 0.01])

    # Get signal maps
    S = J.expand(t[0])
    dS = J.expand(t[1])

    # Get background (instr.)
    B = get_instr_bkg(E)  # FIXME?

    #    plt.loglog(E.means, B.get_integral()/4./np.pi)
    #    plt.loglog(E.means, S.get_integral()/4./np.pi)
    #    plt.show()
    #    quit()

    # Get exposure
    expo = get_exposure(E, Tobs)
    flux = [
        S,
    ] if UL else [dS, S]
    fluxes, noise, systematics, exposure = get_model_input(
        flux, B, [dict(err=B * unc, sigma=corr_length, Sigma=Sigma, nside=0)],
        expo)
    if not syst_flag: systematics = None
    m = sf.Swordfish(fluxes,
                     noise,
                     systematics,
                     exposure,
                     solver='direct',
                     verbose=False)

    if UL:
        # Calculate upper limits with effective counts method
        ec = sf.EffectiveCounts(m)
        x_UL = ec.upperlimit(0.05, 0, gaussian=True)
        sv_UL = x_UL * sv0
        s, b = ec.effectivecounts(0, 1.)
        #        print "Total signal counts (theta = 1):", ec.counts(0, 1.0)
        #        print "Eff.  signal counts (theta = 1):", s
        #        print "Eff.  bkg counts (theta = 1)   :", b
        #        print "Upper limit on theta           :", sv_UL

        return sv_UL
    else:
        F = m.fishermatrix()  # w.r.t. (x,m)
        F[1, 1] /= sv0**2
        F[0, 1] /= sv0
        F[1, 0] /= sv0
        return F
Esempio n. 11
0
def MW_dSph():
    nside = 32

    bg_hpx, exposure_hpx = get_BG()

    dims = ()

    J = get_Jmap()

    Jex = get_extragalactic()

    J = J + Jex

    # Signal definition
    spec = 1.
    dSph = harp.HARPix(dims=dims).add_iso(1, fill=1.)
    pos_list = [(50, 40), (-20, 30), (80, -80), (42, -15)]
    Jlist = [3e21, 3e21, 6e21, 2e21]
    for J0, pos in zip(Jlist, pos_list):
        dSph = dSph.add_singularity(pos, 0.1, 5, n=10)
        #dSph = harp.HARPix(dims = dims).add_disc(pos, 10, 62).add_disc((50,-40), 10, 32)
        dSph.add_func(lambda d: J0 / (.1 + d)**2, mode='dist', center=pos)
    sig = J + dSph
    #sig.data += 1e21 # EGBG
    plot_harp(sig, 'sig.eps', maxfac=1.0)

    # Background definition
    #bg = harp.HARPix(dims = dims).add_iso(64)
    bg = harp.HARPix.from_healpix(bg_hpx, nest=False)
    plot_harp(bg, 'bg.eps', maxfac=0.01)

    exposure = harp.HARPix.from_healpix(exposure_hpx, nest=False)

    # Covariance matrix definition
    cov = harpix_Sigma(sig)
    bg_flat = deepcopy(bg)
    bg_flat.data *= 0
    bg_flat.data += 1e-8
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  .25, Sigma = None, nside =64)
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  .5, Sigma = None, nside =16)
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  1., Sigma = None, nside =16)
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  2., Sigma = None, nside =64)
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  3., Sigma = None, nside =64)
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  4., Sigma = None, nside =64)
    #    cov.add_systematics(err = bg_flat*0.1, sigma =  7., Sigma = None, nside =64)
    cov.add_systematics(err=bg_flat * 0.1, sigma=10., Sigma=None, nside=64)
    cov.add_systematics(err=bg_flat * 0.3, sigma=1e10, Sigma=None, nside=8)
    cov.add_systematics(err=bg_flat * 0.1, sigma=20., Sigma=None, nside=64)
    #cov.add_systematics(err = bg_flat*0.1, sigma = 25., Sigma = None, nside = 1)
    #cov.add_systematics(err = bg_flat*0.1, sigma = 30., Sigma = None, nside = 1)
    #cov.add_systematics(err = bg_flat*0.1, sigma = 35., Sigma = None, nside = 1)
    #cov.add_systematics(err = bg_flat*0.1, sigma = 40., Sigma = None, nside = 1)

    # Set up swordfish
    fluxes = [sig.get_data(mul_sr=True)]
    noise = bg.get_formatted_like(sig).get_data(mul_sr=True)
    expmap = exposure.get_formatted_like(sig).get_data() * 1e-4
    systematics = cov
    #systematics = None
    m = sf.Swordfish(fluxes, noise, systematics, expmap, solver='cg')

    F = m.effectiveinfoflux(0)
    f = harp.HARPix.from_data(sig, F, div_sr=True)
    plot_harp(f, 'MW.eps', maxfac=0.1)
Esempio n. 12
0
def load_bkgs():
    b = dict()
    for i in range(len(bkgs)):
        S1, temp = np.loadtxt("../DD_files/" + bkgs[i] + ".txt", unpack=True)
        interp = interp1d(S1, temp, bounds_error=False, fill_value=0.0)
        b[bkgs[i]] = interp(s1means)
    return b

mlist = np.logspace(1, 3.9, 100) # GeV
b_dict = load_bkgs()
obsT = np.ones_like(s1means)*35636.
obsT2 = np.ones_like(s1means)*35636.*100
B = [b_dict[bkgs[0]]/obsT, b_dict[bkgs[1]]/obsT, b_dict[bkgs[2]]/obsT,
    b_dict[bkgs[3]]/obsT, b_dict[bkgs[4]]/obsT, b_dict[bkgs[5]]/obsT]

SF1 = sf.Swordfish(B, T=[0.01,0.01,0.01,0.01,0.01,0.01], E=obsT)
SF2 = sf.Swordfish(B, T=[0.01,0.01,0.01,0.01,0.01,0.01], E=obsT2)


def genO1(Euclideanize=False):
    norm01 = 9.756e-10
    cp = np.zeros([11])
    cp[0] = norm01
    cn = cp
    ULlist = []
    ULlist_Xenon100T = []

    for i, m in enumerate(mlist):
        dRdS = dRdS1(s1means, m, cp, cn)*s1width
        UL1 = SF1.upperlimit(dRdS, 0.1)
        ULlist.append(UL1*norm01**2)
Esempio n. 13
0
    eff2 = np.append(eff2 / 100., eff2_extra)
    return interp1d(eff1, eff2)


def dRdE(m, E, c, eff):
    s = eff(E) * DMU.dRdE_NREFT(E, m, c, c, "Ar40") + 1.e-30
    return s


eff_temp = load_eff()

obsT = np.ones_like(ER_c) * 1422.
obsT2 = np.ones_like(ER_c) * 1422. * 100. * 50.
#BJK: Changed this from obsT to obsT2
b = np.zeros_like(ER_c) + 0.1 / obsT2 / len(ER_c)
SF2 = sf.Swordfish(b, T=[0.01], E=obsT2)

#print(obsT2)
#print(b)
##################

root01 = h5py.File('../hdf5/Xenon100T_gridscan01_Euclideanized_dRdS1.hdf5')
couplings01 = np.array(root01['c'])
ES01 = np.array(root01['ES'])
mass01 = np.array(root01['mass'])

ESAr = []

for i in tqdm(range(0, len(mass01)), desc="Euclideanizing"):
    cp = couplings01[i, :11]
    signal = dRdE(mass01[i], ER_c, cp, eff_temp) * ER_width
Esempio n. 14
0
def main():
    E = np.linspace(0, 11, 100)

    m = np.linspace(2, 10, 20)  # mass (peak position)
    n = np.linspace(1, 10, 20)  # flux strength (normalization)
    I = np.zeros(
        (len(n), len(m), 2, 2))  # Fisher metric (cartesian coordinates)
    UL = np.zeros(len(m))

    for i, m0 in enumerate(m):
        for j, n0 in enumerate(n):
            sigma = 0.7
            flux = sf.func_to_templates(
                lambda m, n: 1 / sigma * n * np.exp(-0.5 *
                                                    (m - E)**2 / sigma**2),
                [m0, n0])
            noise = E**2 + 10
            exposure = np.ones_like(E) * 1.0
            mysf = sf.Swordfish(flux, noise, None, exposure)
            I[j, i] = mysf.fishermatrix()

        flux = sf.func_to_templates(
            lambda n: 1 / sigma * n * np.exp(-0.5 * (m0 - E)**2 / sigma**2),
            [1])
        mysf = sf.Swordfish(flux, noise, None, exposure)
        ef = sf.EffectiveCounts(mysf)
        UL[i] = ef.upperlimit(0.95, 0)

    tf = mp.TensorField(m, n, I)
    #    tf.quiver()
    vf1, vf2 = tf.get_VectorFields()

    mask = lambda x, y: y > np.interp(x, m, UL)

    lines = vf1.get_streamlines([5, 3], Nmax=200, mask=mask)
    for line in lines:
        plt.plot(line.T[0], line.T[1], '0.5', lw=1.0)

    lines = vf2.get_streamlines([5, 3], Nmax=200, mask=mask)
    for line in lines:
        plt.plot(line.T[0], line.T[1], '0.5', lw=1.0)

    contour = tf.get_contour([8, 4.0], 1, Npoints=328)
    plt.plot(contour.T[0], contour.T[1], 'b')
    contour = tf.get_contour([8, 4.0], 2, Npoints=328)
    plt.plot(contour.T[0], contour.T[1], 'b--')
    contour = tf.get_contour([8, 4.0], 3, Npoints=1000)
    plt.plot(contour.T[0], contour.T[1], 'b:')

    contour = tf.get_contour([4, 7.0], 1, Npoints=128)
    plt.plot(contour.T[0], contour.T[1], 'b')
    contour = tf.get_contour([4, 7.0], 2, Npoints=128)
    plt.plot(contour.T[0], contour.T[1], 'b--')
    contour = tf.get_contour([4, 7.0], 3, Npoints=128)
    plt.plot(contour.T[0], contour.T[1], 'b:')

    plt.plot(m, UL, 'r')
    plt.xlim([2, 10])
    plt.ylim([0, 10])

    plt.savefig('test.eps')
Esempio n. 15
0
        S1, temp = np.loadtxt("../DD_files/" + bkgs[i] + ".txt", unpack=True)
        interp = interp1d(S1, temp, bounds_error=False, fill_value=0.0)
        b[bkgs[i]] = interp(s1means)
    return b


bkgs = ['acc', 'Anom', 'ElectronRecoil', 'n', 'Neutrino', 'Wall']
b_dict = load_bkgs()
obsT = np.ones_like(s1means) * 35636.
b = np.array(b_dict[bkgs[0]] / obsT)
B = [
    b_dict[bkgs[0]] / obsT, b_dict[bkgs[1]] / obsT, b_dict[bkgs[2]] / obsT,
    b_dict[bkgs[3]] / obsT, b_dict[bkgs[4]] / obsT, b_dict[bkgs[5]] / obsT
]

SF = sf.Swordfish(B, T=[0.01, 0.01, 0.01, 0.01, 0.01, 0.01], E=obsT)


def Volumes(Argon=True):
    if Argon:
        filename = '../hdf5/Xenon_DS_250000_'
    else:
        filename = '../hdf5/Xenon100T_'

    R_01 = []
    R_011 = []
    mlist = np.logspace(1, 4, 100)

    # Calculates the number of signal events in XENONnT for each point

    for m in mlist: