示例#1
0
def main():
    N = 10000
    P = np.random.random((N, 2)) * 20
    X = P
    SH = sf.SignalHandler(P, X)
    V = SH.volume(estimate_dim=False)
    b = SH.get_benchmarks()
    print V, len(b)
    plt.scatter(P[:, 0], P[:, 1])
    plt.scatter(P[b, 0], P[b, 1])
    plt.show()
def operatordiscrim(operator=11, both=False):
    root01 = h5py.File('../hdf5/Xenon100T_DS20k_gridscan01_HaloTrue.hdf5')
    couplings01 = np.array(root01['c'])
    random_points = np.unique([randint(0, couplings01.shape[0]-1) for _ in range(5000)])

    ES01Xe = np.array(root01['ESXe'])
    ES01Ar = np.array(root01['ESAr'])
    NuisanceES = np.array(root01['NuisanceES'])
    couplings01 = couplings01
    mass01 = np.array(root01['mass'])

    c01 = np.zeros([couplings01.shape[0], couplings01.shape[1]+1])
    c01[:,0] = mass01
    c01[:,1:] = couplings01

    ##################

    root011 = h5py.File('../hdf5/Xenon100T_DS20k_gridscan0'+str(operator)+'_HaloTrue.hdf5')
    couplings011 = np.array(root011['c'])

    ES011Xe = np.array(root011['ESXe'])
    ES011Ar = np.array(root011['ESAr'])
    NuisanceES11 = np.array(root011['NuisanceES'])
    mass011 = np.array(root011['mass'])

    c011 = np.zeros([couplings011.shape[0], couplings011.shape[1]+1])
    c011[:,0] = mass011
    c011[:,1:] = couplings011

    ESTmp = np.append(ES01Xe, ES01Ar, axis=1)
    ESHaloUnXe = np.append(ES01Xe, NuisanceES, axis=1)
    ESHaloUnXeAr = np.append(ESTmp, NuisanceES, axis=1)
    
    ESTmp11 = np.append(ES011Xe, ES011Ar, axis=1)
    ESHaloUnXe11 = np.append(ES011Xe, NuisanceES11, axis=1)
    ESHaloUnXeAr11 = np.append(ESTmp11, NuisanceES11, axis=1)

    cXe = np.vstack((c01, c011))
    ESXe = np.vstack((ESHaloUnXe, ESHaloUnXe11))

    cXeAr = np.vstack((c01, c011))
    ESXeAr = np.vstack((ESHaloUnXeAr, ESHaloUnXeAr11))


    shXe = sf.SignalHandler(cXe, ESXe)
    shXeAr = sf.SignalHandler(cXeAr, ESXeAr)


    discrimination = []

    for i in tqdm(range(len(c01[:,0]))):
        P0 = c01[i,:]
        pp, el_ind = shXe.query_region(P0, 2.0, return_indices = True)
        if sum(pp[:,operator] > 0.0) > 0:
            discrimination.append(0.)
        else:
            discrimination.append(1.)

    for i in tqdm(range(len(discrimination))):
        P0 = c01[i,:]
        pp, el_ind = shXeAr.query_region(P0, 2.0, return_indices = True)
        if sum(pp[:,operator] > 0.0) > 0:
            discrimination[i] += 0.
        else:
            discrimination[i] += 1.

    # print discrimination,c01[:,0], c01[:,1]
    from scipy.stats import binned_statistic
    discrimination = np.array(discrimination)
    percentile = lambda x: np.percentile(x, 5)
    if both:
        line, bins, _ = binned_statistic(c01[discrimination==2,0], c01[discrimination==2,1], percentile, bins=np.logspace(1,4,num=40))
        bin_c = bins[:-1] + np.diff(bins)
        mp = 0.938 # GeV
        mu = bin_c*mp/(bin_c + mp)
        linesig = (line)**2 * (mu**2/np.pi) * (1.98e-14**2)
        plt.plot(bin_c, linesig, ls=next(linecycler), label="O" + str(operator) + " - Xe")

    line, bins, _ = binned_statistic(c01[np.logical_or(discrimination==1,discrimination==2),0], c01
    [np.logical_or(discrimination==1,discrimination==2),1], percentile, bins=np.logspace(1,4,num=40))
    bin_c = bins[:-1] + np.diff(bins)

    bin_c = bins[:-1] + np.diff(bins)
    mp = 0.938 # GeV
    mu = bin_c*mp/(bin_c + mp)
    linesig = (line)**2 * (mu**2/np.pi) * (1.98e-14**2)
    plt.plot(bin_c, linesig, ls=next(linecycler), label="O" + str(operator) + " - Xe + Ar")
    return None
def modeldiscrim(limit, millicharge = True):
    root01 = h5py.File('../hdf5/Xenon100T_DS20k_gridscan01_HaloTrue.hdf5')
    couplings01 = np.array(root01['c'])
    random_points = np.unique([randint(0, couplings01.shape[0]-1) for _ in range(1000)])

    ES01Xe = np.array(root01['ESXe'])
    ES01Ar = np.array(root01['ESAr'])
    NuisanceES = np.array(root01['NuisanceES'])
    couplings01 = couplings01
    mass01 = np.array(root01['mass'])

    c01 = np.zeros([couplings01.shape[0], couplings01.shape[1]+1])
    c01[:,0] = mass01
    c01[:,1:] = couplings01

    ##################

    if millicharge:
        rootmodel = h5py.File('../hdf5/Xenon100T_DS20k_gridscanmillicharge_HaloTrue.hdf5')
    else:
        rootmodel = h5py.File('../hdf5/Xenon100T_DS20k_gridscanBdipole_HaloTrue.hdf5')

    cmodel = np.array(rootmodel['c'])
    cmodel_01 = np.zeros([cmodel.shape[0], couplings01.shape[1]+1])
    cmodel_01[:,:3] = cmodel
    ESmodelXe = np.array(rootmodel['ESXe'])
    ESmodelAr = np.array(rootmodel['ESAr'])
    NuisanceESmodel = np.array(rootmodel['NuisanceES'])

    ESTmp = np.append(ES01Xe, ES01Ar, axis=1)
    ESHaloUnXe = np.append(ES01Xe, NuisanceES, axis=1)
    ESHaloUnXeAr = np.append(ESTmp, NuisanceES, axis=1)
    
    ESTmpmodel = np.append(ESmodelXe, ESmodelAr, axis=1)
    ESHaloUnXemodel = np.append(ESmodelXe, NuisanceESmodel, axis=1)
    ESHaloUnXeArmodel = np.append(ESTmpmodel, NuisanceESmodel, axis=1)

    cXe = np.vstack((c01, cmodel_01))
    ESXe = np.vstack((ESHaloUnXe, ESHaloUnXemodel))

    cXeAr = np.vstack((c01, cmodel_01))
    ESXeAr = np.vstack((ESHaloUnXeAr, ESHaloUnXeArmodel))

    shXe = sf.SignalHandler(cXe, ESXe)
    shXeAr = sf.SignalHandler(cXeAr, ESXeAr)

    discrimination = []
    if millicharge:
        a = 1
    else:
        a = 2

    for i in tqdm(range(len(c01[:,0]))):
        P0 = c01[i,:]
        pp, el_ind = shXe.query_region(P0, 2.0, return_indices = True)
        if sum(pp[:,a] > 0.0) > 0:
            discrimination.append(0.)
        else:
            discrimination.append(1.)

    for i in tqdm(range(len(discrimination))):
        P0 = c01[i,:]
        pp, el_ind = shXeAr.query_region(P0, 2.0, return_indices = True)
        if sum(pp[:,a] > 0.0) > 0:
            discrimination[i] += 0.
        else:
            discrimination[i] += 1.

    from scipy.stats import binned_statistic
    discrimination = np.array(discrimination)
    percentile = lambda x: np.percentile(x, 10)
    line, bins, _ = binned_statistic(c01[np.logical_or(discrimination==1,discrimination==2),0], c01
    [np.logical_or(discrimination==1,discrimination==2),1], percentile, bins=np.logspace(1.,4,num=40))

    bin_c = bins[:-1] + np.diff(bins)
    mp = 0.938 # GeV
    mu = bin_c*mp/(bin_c + mp)
    linesig = (line)**2 * (mu**2/np.pi) * (1.98e-14**2)

    if millicharge:
        plt.plot(bin_c, linesig, ls=next(linecycler), label="Millicharge - Xe + Ar")
    else:
        linesig[linesig>limit(bin_c)] = 0.0
        plt.plot(bin_c, linesig, ls=next(linecycler), label="Magnetic Dipole - Xe + Ar")

    return None
def massdiscrimmodels(matching, millicharge=True):
    mp = 0.938  # GeV
    mu = mlist * mp / (mlist + mp)
    if millicharge:
        root01 = h5py.File(
            '../hdf5/Xenon100T_DS20k_gridscanmillicharge_HaloTrue.hdf5')
    else:
        root01 = h5py.File(
            '../hdf5/Xenon100T_DS20k_gridscanBdipole_HaloTrue.hdf5')
    couplings01 = np.array(root01['c'])
    random_points = np.unique(
        [randint(0, couplings01.shape[0] - 1) for _ in range(5000)])

    ES01Xe = np.array(root01['ESXe'])
    ES01Ar = np.array(root01['ESAr'])
    NuisanceES = np.array(root01['NuisanceES'])
    noHaloUn = NuisanceES[:, 0] == 0.
    c01 = couplings01

    mu1 = c01[:, 0] * mp / (c01[:, 0] + mp)
    c01[:, 1] = (c01[:, 1])**2 * (mu1**2 / np.pi) * (1.98e-14**2)
    c01[:, 2] = (c01[:, 2])**2 * (mu1**2 / np.pi) * (1.98e-14**2)

    ES01XeNoHaloUn = ES01Xe[noHaloUn]
    ES01ArNoHaloUn = ES01Ar[noHaloUn]
    c01NoHaloUn = c01[noHaloUn, :]

    ESXeAr = np.append(ES01XeNoHaloUn, ES01ArNoHaloUn, axis=1)
    ESTmp = np.append(ES01Xe, ES01Ar, axis=1)
    ESHaloUn = np.append(ESTmp, NuisanceES, axis=1)

    shXe = sf.SignalHandler(c01NoHaloUn, ES01XeNoHaloUn)
    shXeAr = sf.SignalHandler(c01NoHaloUn, ESXeAr)
    shHaloUn = sf.SignalHandler(c01, ESHaloUn)

    sigma_list_Xe = []
    sigma_list_XeAr = []
    sigma_list_HaloUn = []
    m_listXe = []
    m_listXeAr = []
    m_listHaloUn = []

    for i in tqdm(range(len(c01[:, 0]))):
        P0 = c01[i, :]
        pp, el_ind = shHaloUn.query_region(P0,
                                           sigma=2.0,
                                           d=1,
                                           return_indices=True)
        if pp.size == 0.0:
            continue
        if np.max(pp[:, 0]) == np.max(c01[:, 0]):
            m_listHaloUn.append(c01[i, 0])
            if millicharge:
                sigma_list_HaloUn.append(c01[i, 1])
            else:
                sigma_list_HaloUn.append(c01[i, 2])

    ##### Filter results

    sigma_list_Xe = np.array(sigma_list_Xe)
    sigma_list_XeAr = np.array(sigma_list_XeAr)
    sigma_list_HaloUn = np.array(sigma_list_HaloUn)

    m_listXe = np.array(m_listXe)
    m_listXeAr = np.array(m_listXeAr)
    m_listHaloUn = np.array(m_listHaloUn)

    percentile = lambda x: np.percentile(x, 99.9)
    line, bins, _ = binned_statistic(m_listHaloUn,
                                     sigma_list_HaloUn,
                                     percentile,
                                     bins=np.logspace(1, 3.9, num=40))
    bin_c = bins[:-1] + np.diff(bins)
    scale = matching / np.nanmax(line)
    line *= scale

    if millicharge:
        plt.loglog(bin_c,
                   line,
                   ls=next(linecycler),
                   label=r"Millicharge - Xe + Ar")
    elif not millicharge:
        plt.loglog(bin_c,
                   line,
                   ls=next(linecycler),
                   label=r"Magnetic Dipole - Xe + Ar")
def massdiscrim(matching, operator=1, Uncertainties=True, Xe=False):
    if Xe:
        Uncertainties = False
    mp = 0.938  # GeV
    mu = mlist * mp / (mlist + mp)

    root01 = h5py.File('../hdf5/Xenon100T_DS20k_gridscan0' + str(operator) +
                       '_HaloTrue.hdf5')
    couplings01 = np.array(root01['c'])
    random_points = np.unique(
        [randint(0, couplings01.shape[0] - 1) for _ in range(5000)])

    ES01Xe = np.array(root01['ESXe'])
    ES01Ar = np.array(root01['ESAr'])
    NuisanceES = np.array(root01['NuisanceES'])
    noHaloUn = NuisanceES[:, 0] == 0.
    couplings01 = couplings01
    mass01 = np.array(root01['mass'])

    mu1 = mass01 * mp / (mass01 + mp)
    couplings01[:,
                0] = (couplings01[:, 0])**2 * (mu1**2 / np.pi) * (1.98e-14**2)
    couplings01[:, 10] = (couplings01[:, 10])**2 * (mu1**2 / np.pi) * (1.98e-14
                                                                       **2)
    couplings01[:,
                3] = (couplings01[:, 3])**2 * (mu1**2 / np.pi) * (1.98e-14**2)

    c01 = np.zeros([couplings01.shape[0], couplings01.shape[1] + 1])
    c01[:, 0] = mass01

    # mu1 = mass01*mp/(mass01 + mp)
    # for i in range(couplings01.shape[1]):
    #     couplings01[:,i] = (couplings01[:,i])**2 * (mu1**2/np.pi) * (1.98e-14**2)
    c01[:, 1:] = couplings01
    ES01XeNoHaloUn = ES01Xe[noHaloUn]
    ES01ArNoHaloUn = ES01Ar[noHaloUn]
    c01NoHaloUn = c01[noHaloUn, :]

    ESXeAr = np.append(ES01XeNoHaloUn, ES01ArNoHaloUn, axis=1)
    ESTmp = np.append(ES01Xe, ES01Ar, axis=1)
    ESHaloUn = np.append(ESTmp, NuisanceES, axis=1)
    # ESHaloUn = ESTmp

    shXe = sf.SignalHandler(c01NoHaloUn, ES01XeNoHaloUn)
    shXeAr = sf.SignalHandler(c01NoHaloUn, ESXeAr)
    shHaloUn = sf.SignalHandler(c01, ESHaloUn)

    sigma_list_Xe = []
    sigma_list_XeAr = []
    sigma_list_HaloUn = []
    m_listXe = []
    m_listXeAr = []
    m_listHaloUn = []

    if Xe:
        for i in tqdm(range(len(c01NoHaloUn[:, 0]))):
            P0 = c01NoHaloUn[i, :]
            pp, el_ind = shXe.query_region(P0,
                                           sigma=2.0,
                                           d=1,
                                           return_indices=True)
            if pp.size == 0.0:
                continue
            if np.max(pp[:, 0]) == np.max(mass01):
                m_listXe.append(c01NoHaloUn[i, 0])
                sigma_list_Xe.append(c01NoHaloUn[i, operator])

    if not Uncertainties:
        for i in tqdm(range(len(c01NoHaloUn[:, 0]))):
            P0 = c01NoHaloUn[i, :]
            pp, el_ind = shXeAr.query_region(P0,
                                             sigma=2.0,
                                             d=1,
                                             return_indices=True)
            if pp.size == 0.0:
                continue
            if np.max(pp[:, 0]) == np.max(mass01):
                m_listXeAr.append(c01NoHaloUn[i, 0])
                sigma_list_XeAr.append(c01NoHaloUn[i, operator])

    for i in tqdm(range(len(c01[:, 0]))):
        P0 = c01[i, :]
        pp, el_ind = shHaloUn.query_region(P0,
                                           sigma=2.0,
                                           d=1,
                                           return_indices=True)
        if pp.size == 0.0:
            continue
        if np.max(pp[:, 0]) == np.max(mass01):
            m_listHaloUn.append(c01[i, 0])
            sigma_list_HaloUn.append(c01[i, operator])

    ##### Filter results

    sigma_list_Xe = np.array(sigma_list_Xe)
    sigma_list_XeAr = np.array(sigma_list_XeAr)
    sigma_list_HaloUn = np.array(sigma_list_HaloUn)

    m_listXe = np.array(m_listXe)
    m_listXeAr = np.array(m_listXeAr)
    m_listHaloUn = np.array(m_listHaloUn)

    # mlist_temp1 = np.unique(m_listXe)
    # mlist_temp2 = np.unique(m_listXeAr)
    # mlist_temp3 = np.unique(m_listHaloUn)

    # sigma_discrimXe = np.zeros_like(mlist_temp1)
    # sigma_discrimXeAr = np.zeros_like(mlist_temp2)
    # sigma_discrimHaloUn = np.zeros_like(mlist_temp3)

    percentile = lambda x: np.percentile(x, 99.9)
    if Xe:
        line, bins, _ = binned_statistic(m_listXe,
                                         sigma_list_Xe,
                                         percentile,
                                         bins=np.logspace(1, 3.9, num=40))
        bin_c = bins[:-1] + np.diff(bins)
        if not operator == 1:
            scale = matching / np.max(line)
            line *= scale
        plt.plot(bin_c,
                 line,
                 ls=next(linecycler),
                 label=r"O" + str(operator) + r" - Xe w/o Halo Uncertainties")

    if not Uncertainties:
        line, bins, _ = binned_statistic(m_listXeAr,
                                         sigma_list_XeAr,
                                         percentile,
                                         bins=np.logspace(1, 3.9, num=40))
        bin_c = bins[:-1] + np.diff(bins)
        if not operator == 1:
            scale = matching / np.nanmax(line)
            line *= scale
        plt.plot(bin_c,
                 line,
                 ls=next(linecycler),
                 label=r"O" + str(operator) +
                 r" - Xe + Ar w/o Halo Uncertainties")

    line, bins, _ = binned_statistic(m_listHaloUn,
                                     sigma_list_HaloUn,
                                     percentile,
                                     bins=np.logspace(1, 3.9, num=40))
    bin_c = bins[:-1] + np.diff(bins)
    if not operator == 1:
        scale = matching / np.nanmax(line)
        line *= scale
        plt.plot(bin_c,
                 line,
                 ls=next(linecycler),
                 label=r"O" + str(operator) + r" - Xe + Ar")
    elif operator == 1:
        plt.plot(bin_c,
                 line,
                 ls=next(linecycler),
                 label=r"O" + str(operator) + r" - Xe + Ar")
示例#6
0
def Volumes(Argon=True):
    if Argon:
        filename = '../hdf5/Xenon_DS_250000_'
    else:
        filename = '../hdf5/Xenon100T_'

    R_01 = []
    R_011 = []
    mlist = np.logspace(1, 4, 100)

    # Calculates the number of signal events in XENONnT for each point

    for m in mlist:
        c_01 = np.zeros(11)
        c_01[0] = 1.
        c_011 = np.zeros(11)
        c_011[10] = 1.
        s1, N1 = dRdS1(s1means, m, c_01, c_01, Nevents=True)
        s11, N11 = dRdS1(s1means, m, c_011, c_011, Nevents=True)
        R_01.append(N1)
        R_011.append(N11)

    R_01 = interp1d(mlist, R_01)
    R_011 = interp1d(mlist, R_011)

    # Simply load the files, not using all points since stable results are found with less

    root = h5py.File(filename + 'gridscan01011_Euclideanized_dRdS1.hdf5')
    from random import randint
    couplings01011 = np.array(root['c'])
    random_points1 = np.unique(
        [randint(0, couplings01011.shape[0] - 1) for _ in range(50000)])
    ES01011 = np.array(root['ES'])[random_points1]
    mass01011 = np.array(root['mass'])

    c01011 = np.zeros([len(random_points1), couplings01011.shape[1] + 1])
    c01011[:, 0] = mass01011[random_points1]
    c01011[:, 1:] = couplings01011[random_points1]

    ###############

    root01 = h5py.File(filename + 'gridscan01_Euclideanized_dRdS1.hdf5')
    couplings01 = np.array(root01['c'])
    random_points2 = np.unique(
        [randint(0, couplings01.shape[0] - 1) for _ in range(50000)])
    ES01 = np.array(root01['ES'])[random_points2]
    mass01 = np.array(root01['mass'])

    c01 = np.zeros([len(random_points2), couplings01.shape[1] + 1])
    c01[:, 0] = mass01[random_points2]
    c01[:, 1:] = couplings01[random_points2]

    ##################

    root011 = h5py.File(filename + 'gridscan011_Euclideanized_dRdS1.hdf5')
    couplings011 = np.array(root011['c'])
    random_points3 = np.unique(
        [randint(0, couplings011.shape[0] - 1) for _ in range(50000)])
    ES011 = np.array(root011['ES'])[random_points3]
    mass011 = np.array(root011['mass'])

    c011 = np.zeros([len(random_points3), couplings011.shape[1] + 1])
    c011[:, 0] = mass011[random_points3]
    c011[:, 1:] = couplings011[random_points3]

    c = np.vstack((c01011, c01, c011))
    ES = np.vstack((ES01011, ES01, ES011))

    obsT2 = np.ones_like(s1means) * 35636. * 100
    Events = (c[:, 1]**2 * R_01(c[:, 0]) +
              c[:, 11]**2 * R_011(c[:, 0])) * obsT2[0]

    bin_edges = np.linspace(Events.min(), Events.max(), num=8)
    for i in range(bin_edges.size - 1):
        points = np.logical_and(Events > bin_edges[i],
                                Events < bin_edges[i + 1])
        print bin_edges[i], bin_edges[i + 1]
        print("Number of points in bin = ", sum(points))

    # Generate the Signal Handler object and find masks for the different sections of the Venn diagrams

    sh = sf.SignalHandler(c, ES)

    c01mask = np.zeros(len(c[:, 0]), dtype=bool)
    c01mask[c[:, 11] == 0.0] = True
    c01mask = sh.shell(c01mask)

    c011mask = np.zeros(len(c[:, 0]), dtype=bool)
    c011mask[c[:, 1] == 0.0] = True
    c011mask = sh.shell(c011mask)

    cmixmask = np.zeros(len(c[:, 0]), dtype=bool)
    cmixmask[np.logical_or(c[:, 1] == 0.0, c[:, 11] == 0.0)] = True
    cmixmask = sh.shell(cmixmask)

    Volall, wall = sh.volume(d=3, sigma=2., return_individual=True)
    Vol01, w01 = sh.volume(d=3, sigma=2., mask=c01mask, return_individual=True)
    Vol011, w011 = sh.volume(d=3,
                             sigma=2.,
                             mask=c011mask,
                             return_individual=True)
    Volmix, wmix = sh.volume(d=3,
                             sigma=2.,
                             mask=cmixmask,
                             return_individual=True)

    plotarray = []
    bin_edges = np.linspace(Events.min(), Events.max(), num=25)
    for i in range(bin_edges.size - 1):
        pointsall = np.logical_and(Events > bin_edges[i],
                                   Events < bin_edges[i + 1])
        points01 = np.logical_and(Events[c01mask] > bin_edges[i],
                                  Events[c01mask] < bin_edges[i + 1])
        points011 = np.logical_and(Events[c011mask] > bin_edges[i],
                                   Events[c011mask] < bin_edges[i + 1])
        pointsmix = np.logical_and(Events[cmixmask] > bin_edges[i],
                                   Events[cmixmask] < bin_edges[i + 1])

        Vall = sum(wall[pointsall])
        V01 = sum(w01[points01])
        V011 = sum(w011[points011])
        Vmix = sum(wmix[pointsmix])
        V01and011 = V01 + V011 - Vmix

        l_temp = [bin_edges[i], bin_edges[i + 1], V01, V011, Vall, V01and011]
        plotarray.append(l_temp)

    plotarray = np.array(plotarray)
    if Argon:
        np.savetxt("../Notebooks/venn_array_Xe+Ar", plotarray)
    else:
        np.savetxt("venn_array_Xe", plotarray)
    return None