Пример #1
0
    def __init__(self, reco_energy_edges, reco_czenith_edges, depo_energy_edges, true_czenith_edges):
        """
        Expects the energies in eV, but works in GeV
        """


        self._ereco = bhist([np.array(reco_energy_edges)*(1e-9)])
        self._edepo = bhist([np.array(depo_energy_edges)*(1e-9)])
        self._zreco = bhist([reco_czenith_edges]) # these two are in cos(Zenith)
        self._ztrue = bhist([true_czenith_edges])

        # these are now filled with the values of the probability DENSITY functions for each angle/energy combo 
        # TODO right now there is no assumed covariance ... this should be improved 
        self._energy_odds_array = np.array([[ get_odds_energy(deposited, reconstructed) for reconstructed in self.reco_energy_centers] for deposited in self.depo_energy_centers])
#        self._angle_odds_array = np.array([[ get_odds_angle(true, reconstructed) for reconstructed in self.reco_czenith_centers] for true in self.true_czenith_centers]) 
        self._angle_odds_array = np.array([[[ get_odds_angle(true, reconstructed, deposited) for reconstructed in self.reco_czenith_centers] for deposited in self.depo_energy_centers]for true in self.true_czenith_centers])

        # Avoid equating two floats. Look for a sufficiently small difference! 
        max_diff = 1e-12

        # normalize these things! 
        # so for each energy deposited... the sum of (PDF*width evaluated at each reco bin) should add up to 1. 
        for depo in range(len(self._energy_odds_array)):
            self._energy_odds_array[depo] *= 1./sum(self._energy_odds_array[depo]*self.reco_energy_widths)
            assert(abs(1-sum(self._energy_odds_array[depo]*self.reco_energy_widths)) <= max_diff)
    
        for true in range(len(self._angle_odds_array)):
            # for each of the possible true values
            for deposited in range(len(self._angle_odds_array[true])):
                # and each of the possible energies deposited
                self._angle_odds_array[true][deposited] *= 1./sum(self._angle_odds_array[true][deposited]*self.reco_czenith_widths)
                assert(abs(1-sum(self._angle_odds_array[true][deposited]*self.reco_czenith_widths)) <= max_diff)
Пример #2
0
    def reload_null(self):
        """
        This function reloads the null flux 
        """

        sp = SterileParams(0., 0., 0., 0.)

        if self.ui.recoBox.isChecked():
            which = config["recon_flux"] + ".dat"
        else:
            which = config["nu_flux_downsize"] + ".dat"

        f = open(gen_filename(config["datapath"], which, sp), 'rb')
        all_data = pickle.load(f)
        f.close()

        if self.ui.recoBox.isChecked():
            self.e_reco = np.array(bhist([all_data["e_reco"]]).centers)
            self.a_reco = np.array(bhist([all_data["a_reco"]]).centers)
        else:
            self.e_reco = np.array(all_data["e_true"])
            self.a_reco = np.array(all_data["a_true"])

        all_data = all_data["flux"]

        self.flux_null = np.zeros(shape=(len(self.e_reco), len(self.a_reco)))
        for key in all_data.keys():
            if self.check_key(key):
                self.flux_null += np.array(
                    all_data[key]) if self.ui.recoBox.isChecked(
                    ) else self.apply_xs(np.array(all_data[key]), key)
Пример #3
0
null = SterileParams(0., 0., 0., 0.)
#ster = SterileParams(0., 0.1609, 0, 4.47)
ster = SterileParams(0., 0.1609, 0.2205, 4.47)

raw_null = gen_filename(config["datapath"], config["nu_flux"] + ".dat", null)
raw_ster = gen_filename(config["datapath"], config["nu_flux"] + ".dat", ster)

n_bins = 40

true_e_edges, depo_e_edges, null_flux, czenith_edges, errs = generate_singly_diff_fluxes(
    n_bins, datafile=raw_null)
true_e_edges, depo_e_edges, ster_flux, czenith_edges, errs = generate_singly_diff_fluxes(
    n_bins, datafile=raw_ster)

true_e_widths = np.array(bhist([true_e_edges]).widths)

czeniths = np.array(bhist([czenith_edges]).centers)
energies = np.array(bhist([depo_e_edges]).centers)

keys = [str(key) for key in null_flux.keys()]

# the flux is saved like [deposited e][true e][angle]
# I want to sum over the event energies, so we're going to be writing this to a new shape
# The new shape will involve the dimension of deposited energies, and czeniths
base_shape = np.shape(null_flux[keys[0]])
new_shape = (base_shape[0], base_shape[2])

just_one = False
keep_key = "Tau"
Пример #4
0
    choice_e, unused = get_loc( 1e4, depos)

    print("Error: {}".format(get_ang_error(1e4)))

    #ang_odds = np.array([[dataobj.get_czenith_reco_odds(true,reco, choice_e) for true in range(len(ang)-1)] for reco in range(len(ang)-1)])
    #ang_odds = np.array([ ang_odds[it]/sum(ang_odds[it]) for it in range(len(ang_odds))])

    #ang_odds=np.log10(np.ma.masked_where(ang_odds<=1e-10,ang_odds))
   
    choice_a, unused = get_loc(-0.99, ang)

    plt.figure(2)
    ang_odds = np.array([dataobj.get_czenith_reco_odds( choice_a, reco, choice_e) for reco in range(len(ang)-1)])
    print("Sum: {}".format(sum(ang_odds)))

    plt.plot( bhist([ang]).centers, ang_odds)
    #me = plt.pcolormesh(ang,ang,ang_odds,cmap='gist_yarg')
    plt.xlabel("Reconstructed Angle", size=14)
    plt.ylabel("Probability Density",size=14)
    #cbar = plt.colorbar(me)
    #cbar.set_label("Prob Density")
    plt.savefig("fig_angtruereco.png",dpi=400)
    plt.show()
    
    #fluxes = np.array([(1.0 if (true<0.05 and true>-0.05) else 0.0) for true in a_true_centers])
    fluxes = np.array([(1.0 if true<-0.9 else 0.0) for true in a_true_centers])
    fluxes /= sum(fluxes*a_true_widths)

    n_e = 4
    chosen_energies = np.logspace(1, 5, n_e) 
    i_choice_e = [ get_loc(energy, depos)[0] for energy in chosen_energies ]
Пример #5
0
    decay_spectra += BrA1 * TauDecayToA1(Etau, Enu, P)
    decay_spectra += BrHad * TauDecayToHadrons(Etau, Enu, P)
    return decay_spectra


def TauDecayToAll(Etau, Enu, P=1):
    decay_spectra = 0
    decay_spectra += TauDecayToAllHadrons(Etau, Enu, P)
    decay_spectra += 2 * BrLepton * TauLeptonDecay(Etau, Enu, P)
    return decay_spectra


# integrate.quad( function, min, max)

if False:
    # print( integrate.quad( lambda z:TauDecayToAll(1.0, z), 0, 1) )

    from cascade.utils import bhist
    zs = np.logspace(-8, 0, 1000)
    cens = bhist([zs]).centers

    dz = np.array([TauDecayToAll(1.0, z) for z in cens])
    wids = bhist([zs]).widths

    import matplotlib
    matplotlib.use('TkAgg')
    import matplotlib.pyplot as plt

    plt.plot(cens, dz * wids)
    plt.show()
Пример #6
0
        flux = {}
        for key in data.get_keys(just_casc=(not tracks),just_tracks=tracks):
            flux[key] = np.array(data.fluxes[key])
        return (np.array(data.energies), np.array(data.angles), flux)

    return( all_data["e_true"], all_data["a_true"], all_data["flux"] )

null = SterileParams(0., 0., 0., 0.)
ster = SterileParams(0., 0.1609, 0.2205, 4.47)
#ster = SterileParams(0., 0.1609, 0.0, 4.47)
print("Loading files from {}".format(config['datapath']))
e_true, a_true, flux_null    = _load_flux(null, False)
e_true, a_true, flux_sterile = _load_flux(ster, False)

keys = flux_null.keys()
centers = bhist([e_true]).centers


for key in keys:
    flav = get_flavor(key)
    neut = get_neut(key)
    curr = get_curr(key) 

    for i_energy in range(len(centers)):
        xs = get_xs(centers[i_energy], flav, neut, curr)

        pcent = 1.0
        
        if False: #("Tau" in key) and ("CC" in key):
            pcent = 0.51
    f = open(name, 'rb')
    all_data = pickle.load(f)
    f.close()

    e_reco = all_data["e_true"]
    a_reco = all_data["a_true"]
    flux = all_data["flux"]

    return (e_reco, a_reco, flux)


e_reco, a_reco, flux = _load_flux(
    gen_filename(config["datapath"], config["nu_flux_downsize"] + ".dat",
                 SterileParams()))

energies = bhist([e_reco]).centers
a_widths = bhist([a_reco]).widths
angles = bhist([a_reco]).centers

keys = list(flux.keys())


def is_track(key):

    curr = key.split("_")[2].lower()
    if "nc" == curr:
        return (False)
    elif "cc" == curr:
        flavor = key.split("_")[0].lower()
        if flavor == "mu":
            return (True)
Пример #8
0
    return (e_reco, a_reco, error)


e_reco, a_reco, kflux = _load_flux(
    gen_filename(config["datapath"], config["recon_flux"] + ".dat", 0.1339,
                 0.0, 1.3))

e_reco, a_reco, kerror = _load_error(
    gen_filename(config["datapath"], config["flux_error"] + ".dat", 0.1339,
                 0.0, 1.3))

flux = sum(kflux.values())
error = sum(kerror.values())

angle_widths = bhist([np.arccos(a_reco)]).widths
angle_centers = bhist([np.arccos(a_reco)]).centers
energy_centers = np.array(bhist([e_reco]).centers)
# first dim of flux is for energy
# flux[energy][angle]

print(np.shape(flux))

summed_flux = np.array([
    sum(flux[i_energy] * angle_widths)
    for i_energy in range(len(energy_centers))
])
summed_error = np.array([
    sum(error[i_energy] * angle_widths)
    for i_energy in range(len(energy_centers))
])
Пример #9
0
null_total = np.zeros(shape = np.shape(flux_null[ex]))
sterile_total = np.zeros(shape = np.shape(flux_null[ex]))

just_nubar = False

keep_key = "Tau"
for key in flux_null.keys():
    if just_nubar and (keep_key not in key):
        print("Skip {}".format(key))
        continue

    null_total += flux_null[key]
    sterile_total+=flux_sterile[key]

ratio = sterile_total / null_total

energies = np.array(bhist([e_reco]).centers)
czeniths = np.array(bhist([a_reco]).centers)

cf = plt.pcolormesh(czeniths, energies/(1e9), ratio, cmap=cm.coolwarm, vmin=1.0-width, vmax=1.0+width)
plt.yscale('log')
plt.ylabel("Reco Energy [GeV]", size=14)
plt.xlabel(r"Reco $\cos\theta$",size=14)
if just_nubar:
    plt.title("Only Looking at: {}".format(keep_key),size=14)
cbar = plt.colorbar() #cf,ticks=ticker.LogLocator())
cbar.set_label(r"Sterile Flux / Null Flux")
plt.savefig("flux_ratio" +("_{}".format(keep_key) if just_nubar else "")+"_muonD.png",dpi=400)
plt.show()

Пример #10
0
just_nubar = True
keep_key = "Tau"

null = SterileParams(0., 0., 0., 0.)
ster = SterileParams(0., 0.1609, 0.2296, 4.47)

e_true, a_true, flux_null = _load_flux(
    "/home/benito/software/data/cascade/poly_sib/downsized_raw_det_flux_0.0_0.0_0.0_0.0.dat"
)
e_true, a_true, flux_tau = _load_flux(
    "/home/benito/software/data/cascade/poly_sib/downsized_raw_det_flux_0.0_0.0_0.0_0.0Copy of .dat"
)

keys = flux_null.keys()
centers = bhist([e_true]).centers

for key in keys:
    flav = get_flavor(key)
    neut = get_neut(key)
    curr = get_curr(key)

    for i_energy in range(len(centers)):
        xs = get_xs(centers[i_energy], flav, neut, curr)

        if "nubar" in key.lower():
            P = -1
        else:
            P = 1

        pcent = 1.0
Пример #11
0
    plt.legend()
    print("saving 'wow.png'")
    plt.savefig("wow_{:.2f}.png".format(glob_angle), dpi=400)

savefile = ".analysis_level.dat"

if mode == 8 or do_all:
    if load_stored and os.path.exists(savefile):
        event, cascade, nuflux, angle_edges = _load_data(glob_angle)
    else:
        event, cascade, nuflux, angle_edges = generate_singly_diff_fluxes(
            n_bins)

    from_muon, from_not = sep_by_flavor(nuflux)

    event_energies = np.array(bhist([event]).centers)
    cascade_energies = np.array(bhist([cascade]).centers)

    from_muon = np.ma.masked_where(from_muon <= 0, from_muon)
    from_not = np.ma.masked_where(from_not <= 0, from_not)

    plt.figure()
    levels = np.logspace(-50, -33, 10)
    print("Max of muon: {}".format(np.max(from_muon)))
    cf = plt.contourf(event_energies / const.GeV,
                      cascade_energies / const.GeV,
                      from_muon,
                      cmap=cm.coolwarm,
                      locator=ticker.LogLocator(),
                      levels=levels)
    plt.xscale('log')
Пример #12
0
def do_for_key(event_edges,e_deposited_edges, key,data, angles):
    """
    This function takes the desired bin edges for the event energies and deposited energies along with the dictionary key corresponding to a specific combination of falvor, current, and neutrino type.

    It builds up the 2D flux array (singly differential), which it then returns along with a similar array but for flux uncertainties 
    """
    evt = bhist([event_edges])
    cas = bhist([e_deposited_edges])

    event_energies = evt.centers
    event_widths = evt.widths
    deposited_energies = cas.centers
    e_deposited_widths = cas.widths

    flav = key.split("_")[0]
    curr = key.split("_")[2]
    neut = key.split("_")[1]

    ang_list = bhist([angles]).centers

    #flux = bhist((e_deposited_edges, event_edges, angles))
    #err = bhist((e_deposited_edges, event_edges, angles))

    flux = np.zeros( shape=(len(e_deposited_edges)-1, len(event_edges)-1, len(angles)-1))
    err = np.zeros( shape=(len(e_deposited_edges)-1, len(event_edges)-1, len(angles)-1))

    for i_a in range(len(ang_list)):
        angle = ang_list[i_a]

        # in this case, knowing the cascade doesn't tell us anything about the event energy. 
        # so we loop over both, get the flux*differential_xs at each bin combination, and multiply by the widths of deposited-energy-bin to get the same units as in the CC case 
        for evt_bin in range(len(event_energies)):
            for dep_bin in range(len(deposited_energies)):
                deposited_energy = deposited_energies[dep_bin] #in hadronic shower
                lepton_energy = event_energies[evt_bin] - deposited_energies[dep_bin]

                if deposited_energy>event_energies[evt_bin]:
                    continue

                if curr=="CC":
                    if flav=="E":
                        scale = 1.0
                    elif flav=="Tau":
                        scale = 0.51 # this was manually calculated as the best value, regardless of the actual tau energy 
                    else:
                        continue

                    # in the charge current, some of the lepton's energy gets deposited too
                    # All of the electrons, and about half the tau's 
                    deposited_energy += scale*lepton_energy

                    try:
                        adj_dep_bin = get_loc( deposited_energy, e_deposited_edges )[0]
                    except ValueError:
                        continue
                else:
                    adj_dep_bin = dep_bin

                # we'll have nowhere to put these, so let's just skip this
                if deposited_energy < min(e_deposited_edges):
                    continue
                if deposited_energy > max(e_deposited_edges):
                    continue
        
                xs = get_diff_xs(event_energies[evt_bin], get_flavor(key), get_neut(key), get_curr(key), lepton_energy,0.0)*e_deposited_widths[dep_bin]*event_widths[evt_bin]

                amount =data.get_flux(event_energies[evt_bin],key, angle=angle)*xs*event_widths[evt_bin]
                amount_err = data.get_err(event_energies[evt_bin],key, angle=angle)*xs
                
                flux[adj_dep_bin][evt_bin][i_a] += amount/(e_deposited_widths[adj_dep_bin]*event_widths[evt_bin])
                err[adj_dep_bin][evt_bin][i_a] += amount_err/(e_deposited_widths[adj_dep_bin]*event_widths[evt_bin])

                #flux.register(amount, deposited_energy, event_energies[evt_bin], angle)
                #err.register(amount_err, deposited_energy, event_energies[evt_bin], angle)

    
    # build a new bhist in reconstruction space (Still with event energy too)
    # then scan through deposited-true angle space
    # and use the PDFS to smear the true values into reconstructed values, depositing them into the reconstruction bhist  

    return(flux, err)
Пример #13
0
def incorporate_recon(event_edges, cascade_edges, nuflux, angle_edges,errors, params, just_flux=True):
    """
    This takes in the results from `generate_singly_diff_fluxes` and incorporates reconstruction uncertainties

    Should take in a list or array of energies (true, deposited), in units of eV
    And also take in a list of true cos(zenith) edges 
    """
    if not isinstance(params, SterileParams):
        raise TypeError("Expected {} for params, not {}".format(SterileParams, type(params)))

    e_min = min(cascade_edges)
    e_max = max(cascade_edges)

    z_min = min(angle_edges)
    z_max = max(angle_edges)

    # we need to get all the centers for these bins with the given edges. 
    # these will be associeed with each of the bins in nuflux 
    
    cascade_centers = bhist([cascade_edges]).centers
    depo_widths = bhist([cascade_edges]).widths
    true_e_centers = bhist([event_edges]).centers
    true_ang_centers = bhist([angle_edges]).centers
   
    true_e_widths = bhist([event_edges]).widths
    true_ang_widths = 2*pi*np.arccos(bhist([angle_edges]).widths)

    # these are reconstruction objects 
    r_energy = bhist([ np.logspace(np.log10(e_min), np.log10(e_max), int(len(cascade_edges)/2)) ])
    r_angle  = bhist([ np.linspace( z_min, z_max, int(len(angle_edges)/2))])
    
    print("Reconstruction Parameters")
    print("    Energy: {} to {} GeV".format(sci(e_min), sci(e_max)))
    print("    cos(t): {} to {} ".format(z_min, z_max))

    r_energy_centers = r_energy.centers
    r_energy_widths = r_energy.widths
    r_angle_centers = r_angle.centers
    r_angle_widths = 2*pi*np.arccos(r_angle.widths)

    #build the data object
    # this thing take in those edges and centers and correctly builds normalized probabilities for the given bins 
    dataobj = DataReco(r_energy.edges, r_angle.edges, cascade_edges, angle_edges)

    # may god have mercy on our souls 
    recoflux = {}
    total_flux = {}
    flux_error = {}
    for key in nuflux.keys():
        print("Reconstructing {} Flux".format(key))
        # energy x, angle y
        if not just_flux:
            recoflux[key] = np.zeros(shape=(len(r_energy_centers),len(true_e_centers), len(r_angle_centers),len(true_ang_centers)))
        total_flux[key] = np.zeros(shape=(len(r_energy_centers), len(r_angle_centers)))
        flux_error[key] = np.zeros(shape=(len(r_energy_centers), len(r_angle_centers)))
        for i_e_reco in range(len(r_energy_centers)):
            for i_e_depo in range(len(cascade_centers)):
                depo_odds = dataobj.get_energy_reco_odds(i_e_depo, i_e_reco) # unitless
                if depo_odds<0.:
                    raise ValueError("Negative energy reco odds {}".format(depo_odds))
                elif depo_odds==0:
                    continue
                for i_a_true in range(len(true_ang_centers)):
                    for i_a_reco in range(len(r_angle_centers)):
                        ang_odds = dataobj.get_czenith_reco_odds(i_a_true, i_a_reco,i_e_depo) # unitless 
                        if ang_odds<0.:
                            raise ValueError("Negative angular reconstrucion odds: {}".format(ang_odds))
                        elif ang_odds==0:
                            continue
                        for i_e_true in range(len(true_e_centers)):
                            scale = true_ang_widths[i_a_true]*true_e_widths[i_e_true]*depo_widths[i_e_depo]/(r_angle_widths[i_a_reco]*r_energy_widths[i_e_reco])


                            amt = nuflux[key][i_e_depo][i_e_true][i_a_true]*depo_odds*ang_odds*scale
                            amt_err = errors[key][i_e_depo][i_e_true][i_a_true]*depo_odds*ang_odds*scale

                            if amt>=0:
                                if not just_flux:
                                    recoflux[key][i_e_reco][i_e_true][i_a_reco][i_a_true] += amt 
                                total_flux[key][i_e_reco][i_a_reco] += amt
                                flux_error[key][i_e_reco][i_a_reco] += amt_err

    if not just_flux:
        reco_flux_name = gen_filename( config["datapath"], config["all_fluxes"]+".dat",params)
        savefile(reco_flux_name, e_reco=r_energy.edges, e_true=event_edges, a_reco=r_angle.edges,a_true=angle_edges,flux=recoflux)

    flux_file = gen_filename(config["datapath"], config["recon_flux"]+".dat", params)
    savefile(flux_file, e_reco=r_energy.edges, a_reco=r_angle.edges, flux=total_flux)
    err_file = gen_filename(config["datapath"],config["flux_error"]+".dat", params)
    savefile(err_file, e_reco=r_energy.edges, a_reco=r_angle.edges, error=flux_error)