def __call__(self, E_total, E_depo, P):
        if not (isinstance(E_depo, float) or isinstance(E_depo, int)):
            raise TypeError("Expected {} for E_depo, not {}".format(
                float, type(E_depo)))

        if E_depo > E_total:
            return 0.

        p0 = (E_total, E_depo)

        total_e_lower, total_e_upper = get_loc(E_total, self._total_energies)
        had_e_lower, had_e_upper = get_loc(E_depo, self._depo_energies)

        p1 = (self._total_energies[total_e_lower],
              self._depo_energies[had_e_lower])
        p2 = (self._total_energies[total_e_upper],
              self._depo_energies[had_e_upper])

        if P == 1:
            p_i = 0
        elif P == -1:
            p_i = 1
        else:
            raise ValueError("P not recognized: {}, use 1 or -1".format(P))

        q11 = self._xs_scale[p_i][total_e_lower][had_e_lower]
        q12 = self._xs_scale[p_i][total_e_lower][had_e_upper]
        q21 = self._xs_scale[p_i][total_e_upper][had_e_lower]
        q22 = self._xs_scale[p_i][total_e_upper][had_e_upper]

        return (bilinear_interp(p0, p1, p2, q11, q12, q21, q22))
def _load_data(glob_angle = None):
    """
    Loads the datafile. Returns tuple

    0 - parent energies
    1 - child energies
    2 - nuflux
    3 - cos(zenith) edges
    """
    print("Loading Data")
    f = open(savefile, 'rb')
    all_data = pickle.load(f)
    f.close()
    angle_edges = all_data["czenith"]
    nuflux = all_data["flux"]

    if glob_angle is not None:
        # figure out which slice to return
        # [:,:,N] would return the N'th angle bin
        if glob_angle<min(angle_edges) or glob_angle>max(angle_edges):
            for key in nuflux.keys():
                nuflux[key] = nuflux[key][:,:,0]*0.
        else:
            lower, upper = get_loc(glob_angle, angle_edges)

            print("Grabbed angle bin {}".format(lower))
            width = abs( np.arccos(angle_edges[upper]) - np.arccos(angle_edges[lower]))

            for key in nuflux.keys():
                nuflux[key] = nuflux[key][:,:,lower]*width
                

    return( all_data["e_true"], all_data["e_depo"], \
                nuflux, angle_edges )
def _ice_grad(data_dict, grad_no):
    """
    Calculates the expected systematic uncertainty error in each of the bins from the ice gradients published in the MEOWS Paper
    """
    if not isinstance(data_dict, dict):
        raise TypeError("Expected {}, got {}".format(dict, type(data_dict)))

    new_uncertainty = np.zeros(shape=np.shape(data_dict["stat_err"]))

    energies = data_dict["e_edges"]
    cos_th = data_dict["a_edges"]

    ice_grads = np.loadtxt(
        os.path.join(os.path.dirname(__file__), "resources", "icegrad.dat"))
    ice_e = np.logspace(log10(500), 4, 13)

    for e_i in range(len(energies) - 1):
        for a_i in range(len(cos_th) - 1):
            if energies[e_i] < ice_e[0]:
                ice_bin = 0
            elif energies[e_i] > ice_e[-1]:
                ice_bin = len(ice_e) - 1
            else:
                ice_bin = get_loc(energies[e_i], ice_e)[0]
            new_uncertainty[e_i][a_i] = ice_grads[ice_bin][grad_no + 1] / 100.0

    p_pert = (1 + new_uncertainty) * data_dict["event_rate"]
    m_pert = (1 - new_uncertainty) * data_dict["event_rate"]

    return _flipper(data_dict["event_rate"], (m_pert, p_pert))
Exemple #4
0
    def get_interp_flux(self):
        """
        This creates the interpolated flux by accessing the currently set angles, finding the four neighboring fluxes, and then performing a bilinear interpolation 
        """

        # this gets the indices of the two mixing angle values neighboring the intermediate one we hav enow
        i_x1, i_x2 = get_loc(self.electron_angle, self.theta03s)
        i_y1, i_y2 = get_loc(self.tau_angle, self.theta23s)

        # now let's build the parameter objects using those neighboring points we have
        param_11 = SterileParams(self.theta03s[i_x1], self.thetamu,
                                 self.theta23s[i_y1], self.msq)
        param_12 = SterileParams(self.theta03s[i_x1], self.thetamu,
                                 self.theta23s[i_y2], self.msq)
        param_21 = SterileParams(self.theta03s[i_x2], self.thetamu,
                                 self.theta23s[i_y1], self.msq)
        param_22 = SterileParams(self.theta03s[i_x2], self.thetamu,
                                 self.theta23s[i_y2], self.msq)

        which = (config["recon_flux"] if self.ui.recoBox.isChecked() else
                 config["nu_flux_downsize"]) + ".dat"

        # using those indices, we generate the names of the flux files and load
        flux_11 = self._load_flux_file(
            gen_filename(config["datapath"], which, param_11))
        flux_12 = self._load_flux_file(
            gen_filename(config["datapath"], which, param_12))
        flux_21 = self._load_flux_file(
            gen_filename(config["datapath"], which, param_21))
        flux_22 = self._load_flux_file(
            gen_filename(config["datapath"], which, param_22))

        # these are useful intermediates used for my bilinear interpolation function
        p0 = (self.electron_angle, self.tau_angle)
        p1 = (self.theta03s[i_x1], self.theta23s[i_y1])
        p2 = (self.theta03s[i_x2], self.theta23s[i_y2])

        return bilinear_interp(p0, p1, p2, flux_11, flux_12, flux_21, flux_22)
Exemple #5
0
def load(debug=False, e_edges = np.logspace(2,9,29)):

    filename = "/home/benito/software/data/cascade/hg_sib/charm_search_supplemental/event_properties.txt"
    data = np.loadtxt(filename, dtype=str)
    """
    Entries
        0 - event no
        1 - topology
        2 - photoelectrons
        3 - energy 
        4 - energy error, atmospheric
        5 - energy error, astro spectrum -2.47
        6 - energy error, astro spectrum -2
        7 - declination 
        8,9,10 - error on declination (same as before)
    """

    n_below = 0
    n_above = 0
    n_tracks = 0
    n_cascades = 0

    e_edges = np.logspace(2,7,29)
    #e_edges = np.logspace(2,8,21)

    occupation = np.zeros(shape=(len(e_edges)-1, 2))
    # one bin in cth 
    for event in data:
        if event[1].lower()=="track":
            n_tracks +=1
            continue
        else:
            n_cascades += 1
        # we have the direction the particles came from stored. 
        # Not the direction they are going! 
        cth = cos((pi/2) + float(event[7])*pi/180 )
        energy = float(event[3])*1e3 # TeV->GeV 
        if cth<0.2:
            n_below+=1
            i_bin = get_loc(energy, e_edges)[0]
            
            occupation[i_bin][0]+=1
        else:
            n_above+=1
#            occupation[i_bin][1]+=1
    print("Found {} tracks, {} cascades; {} below".format(n_tracks, n_cascades, n_below))
    if debug:
        return occupation
    else:
        return np.array(occupation)
def add_contour(filename, ls, cmap='cool', chop=False):
    f = open(filename, 'rb')
    data = pickle.load(f)
    f.close()
    this_chi = np.array(data["chi2s"])
    evs = [0.5, 1.0, 3.0, 10.0]

    count = 0
    for ev in evs:

        which_sliver = get_loc(ev, msqs)

        temp = np.zeros(shape=(2, len(theta24s), len(theta34s)))
        for t24 in range(len(theta24s)):
            for t34 in range(len(theta34s)):
                temp[0][t24][t34] = this_chi[t24][t34][which_sliver[0]]
                temp[1][t24][t34] = this_chi[t24][t34][which_sliver[1]]
        final_chi = get_closest(ev,
                                [msqs[which_sliver[0]], msqs[which_sliver[1]]],
                                temp)

        if chop:
            # need to chop off the right side :frown:
            for i_x in range(len(scale_x)):
                for i_y in range(len(scale_y[i_x])):
                    if final_chi[i_x][i_y] < 0 or final_chi[i_x][i_y] > 1000:
                        final_chi[i_x][i_y] = None
                    if scale_x[i_x][i_x] > 6e-2:
                        final_chi[i_x][i_y] = None

        ct = plt.contour(scale_x,
                         scale_y,
                         final_chi.transpose(),
                         levels=chis_l,
                         cmap=cmap,
                         linestyles=ls)
        ct.collections[0].set_color(get_color(count + 1,
                                              len(evs) + 1, "magma"))
        suffix = "Joint" if chop else "Cascades"
        ct.collections[0].set_label("{}, {:.1f}".format(suffix, ev) +
                                    r"eV$^{2}$")
        count += 1
    return ct
Exemple #7
0
def get_slice(eV: float, chis: np.ndarray):
    """
    Get a slice of the chi-squared arrays at the requested mass squared difference 
    """
    which_sliver = get_loc(eV, msqs)
    print("slivers: {}".format(which_sliver))
    if interpolate:
        chis_final = np.zeros(shape=(2, len(chis), len(chis[0])))
        for t24 in range(len(chis)):
            for t34 in range(len(chis[t24])):
                #print("{} {}".format(t24, t34))
                chis_final[0][t24][t34] = chis[t24][t34][which_sliver[0]]
                chis_final[1][t24][t34] = chis[t24][t34][which_sliver[1]]
        return get_closest(eV, [msqs[which_sliver[0]], msqs[which_sliver[1]]],
                           chis_final)
    else:
        which_sliver = which_sliver[0]
        new_chis = np.zeros(shape=(len(theta24s), len(theta34s)))
        for i24 in range(len(theta24s)):
            for i34 in range(len(theta34s)):
                new_chis[i24][i34] = chis[i24][i34][which_sliver]
        return new_chis
    axes[0].set_xscale('log')
    axes[0].set_yscale('log')
    axes[0].set_xlim([10**0,10**7])
    axes[0].set_ylim([10**0,10**7])
    axes[0].set_xlabel("Deposited [GeV]", size=14)
    axes[0].set_ylabel("Reconstructed [GeV]", size=14)
    divider = make_axes_locatable(axes[0])
    cax = divider.append_axes('right', size='5%', pad=0.05)
    cbar = plt.colorbar(mappable=ctf, cax=cax) #, ticks=ticker.LogLocator())
    cbar.set_label("Probability Density")

    # slides !
    slices = 10**np.array([1,2,3,4,5])
    for cut in slices:

        left, right = get_loc(float(cut), depos)
        loc = left if abs(cut-depos[left])<abs(cut-depos[right]) else right
        width_factor = dataobj.depo_energy_widths[loc]
        odds = [ dataobj.get_energy_reco_odds(loc, val) for val in range(len(recos))]
        axes[1].plot(recos, odds, color=(.1,.1,.1))
    
    axes[1].set_xscale('log')
    axes[1].set_xlabel("Reconstructed [GeV]", size=14)
    axes[1].set_ylabel("Prob. Density", size=14)
    
    figs.savefig("fig_deporeco.png",dpi=400)
    plt.show()
        
    choice_e, unused = get_loc( 1e4, depos)

    print("Error: {}".format(get_ang_error(1e4)))
Exemple #9
0
minos_24 = 7.0/deg
minos_34 = 26./deg

scaled_minos_24 = np.sin(minos_24)**2
t24s = np.logspace(-3, np.log10(scaled_minos_24), 1000)
scaled_minos_34 = [(np.sin(minos_34)**2)*(np.cos(theta)**2) for theta in t24s]

super_k = np.transpose(np.loadtxt("sk.dat", delimiter=","))
deepcore = np.transpose(np.loadtxt("deepcore.dat", delimiter=","))
antares = np.transpose(np.loadtxt("antares.dat", delimiter=","))

color_count = 0
for ev in evs:
    color_count+=1

    which_sliver = get_loc(ev, msqs)
    print(which_sliver)
    if interp:
        chis_f = np.zeros(shape=(2, len(theta24s), len(theta34s)))
        for t24 in range(len(theta24s)):
            for t34 in range(len(theta34s)):
                chis_f[0][t24][t34] = chi2[t24][t34][which_sliver[0]]
                chis_f[1][t24][t34] = chi2[t24][t34][which_sliver[1]]
        chis = get_closest( ev, [msqs[which_sliver[0]], msqs[which_sliver[1]]], chis_f)


    else:
        sliver = which_sliver[0]
        chis = np.zeros(shape=(len(theta24s), len(theta34s)))
        
def do_for_key(event_edges,e_deposited_edges, key,data, angles):
    """
    This function takes the desired bin edges for the event energies and deposited energies along with the dictionary key corresponding to a specific combination of falvor, current, and neutrino type.

    It builds up the 2D flux array (singly differential), which it then returns along with a similar array but for flux uncertainties 
    """
    evt = bhist([event_edges])
    cas = bhist([e_deposited_edges])

    event_energies = evt.centers
    event_widths = evt.widths
    deposited_energies = cas.centers
    e_deposited_widths = cas.widths

    flav = key.split("_")[0]
    curr = key.split("_")[2]
    neut = key.split("_")[1]

    ang_list = bhist([angles]).centers

    #flux = bhist((e_deposited_edges, event_edges, angles))
    #err = bhist((e_deposited_edges, event_edges, angles))

    flux = np.zeros( shape=(len(e_deposited_edges)-1, len(event_edges)-1, len(angles)-1))
    err = np.zeros( shape=(len(e_deposited_edges)-1, len(event_edges)-1, len(angles)-1))

    for i_a in range(len(ang_list)):
        angle = ang_list[i_a]

        # in this case, knowing the cascade doesn't tell us anything about the event energy. 
        # so we loop over both, get the flux*differential_xs at each bin combination, and multiply by the widths of deposited-energy-bin to get the same units as in the CC case 
        for evt_bin in range(len(event_energies)):
            for dep_bin in range(len(deposited_energies)):
                deposited_energy = deposited_energies[dep_bin] #in hadronic shower
                lepton_energy = event_energies[evt_bin] - deposited_energies[dep_bin]

                if deposited_energy>event_energies[evt_bin]:
                    continue

                if curr=="CC":
                    if flav=="E":
                        scale = 1.0
                    elif flav=="Tau":
                        scale = 0.51 # this was manually calculated as the best value, regardless of the actual tau energy 
                    else:
                        continue

                    # in the charge current, some of the lepton's energy gets deposited too
                    # All of the electrons, and about half the tau's 
                    deposited_energy += scale*lepton_energy

                    try:
                        adj_dep_bin = get_loc( deposited_energy, e_deposited_edges )[0]
                    except ValueError:
                        continue
                else:
                    adj_dep_bin = dep_bin

                # we'll have nowhere to put these, so let's just skip this
                if deposited_energy < min(e_deposited_edges):
                    continue
                if deposited_energy > max(e_deposited_edges):
                    continue
        
                xs = get_diff_xs(event_energies[evt_bin], get_flavor(key), get_neut(key), get_curr(key), lepton_energy,0.0)*e_deposited_widths[dep_bin]*event_widths[evt_bin]

                amount =data.get_flux(event_energies[evt_bin],key, angle=angle)*xs*event_widths[evt_bin]
                amount_err = data.get_err(event_energies[evt_bin],key, angle=angle)*xs
                
                flux[adj_dep_bin][evt_bin][i_a] += amount/(e_deposited_widths[adj_dep_bin]*event_widths[evt_bin])
                err[adj_dep_bin][evt_bin][i_a] += amount_err/(e_deposited_widths[adj_dep_bin]*event_widths[evt_bin])

                #flux.register(amount, deposited_energy, event_energies[evt_bin], angle)
                #err.register(amount_err, deposited_energy, event_energies[evt_bin], angle)

    
    # build a new bhist in reconstruction space (Still with event energy too)
    # then scan through deposited-true angle space
    # and use the PDFS to smear the true values into reconstructed values, depositing them into the reconstruction bhist  

    return(flux, err)
Exemple #11
0
data1 = openfl(
    "/home/benito/software/data/cascade/hg_sib/0.0/joint_likelihood_0.0_0.0_0.0_0.0.dat"
)
data2 = openfl(
    "/home/benito/software/data/cascade/hg_sib/0.0/joint_likelihood_nosys_0.0_0.0_0.0_0.0.dat"
)
theta24s = data1["theta24s"]
theta34s = data1["theta34s"]
print(len(theta24s))
print(len(theta34s))
msqs = data1["msqs"]
chi1 = np.array(data1["chi2s"])
chi2 = np.array(data2["chi2s"])

th24_cut = asin(sqrt(0.03))
theta24s_cut = theta24s[:get_loc(th24_cut, theta24s)[0]]
chis1_cut = chi1[:get_loc(th24_cut, theta24s)[0]]
chis2_cut = chi2[:get_loc(th24_cut, theta24s)[0]]
xs_cut, ys_cut = np.meshgrid(theta24s_cut, theta34s)
scale_x_cut = np.sin(xs_cut)**2
scale_y_cut = (np.cos(xs_cut)**2) * (np.sin(ys_cut)**2)

# relevant 90% chi squared for three DOF
chis_l = [6.251]
labels = ["90%"]
assert (len(chis_l) == len(labels))


def set_lbls(ct_plot):
    fmt = {}
    for l, s in zip(ct_plot.levels, labels):
    def __call__(self, energy: float, cth: float, flavor: str):
        if flavor != "e" and flavor != "mu" and flavor != "tau":
            raise ValueError("Unrecognized flavor string: {}".format(flavor))

        i_ang = get_loc(cth, self.a_edges)[0]
        return self.fits[flavor][i_ang](energy) * 1e4  #m2 to cm2
def build_mc_flux(*dataobjs, livetime=10):
    """
    Takes some Data objects and uses the MC we have to build up expectation arrays 

    the optional livetime arg scales the expectation to to that number of years of uptime 
    """
    cobalt = os.environ.get("_CONDOR_SCRATCH_DIR")
    filename = "NuFSGenMC_nominal.dat"
    if cobalt == None or cobalt == "" or cobalt == ".":
        file_dir = "/home/benito/Downloads/IC86SterileNeutrinoDataRelease/monte_carlo/" + filename
    else:
        file_dir = os.path.join(cobalt, "data", filename)

    def metaflux(energy, angle, key):
        net_f = 0.0
        for dobj in dataobjs:
            net_f = net_f + dobj.get_flux((1e9) * energy, key, angle=angle)
        return net_f

    filename = "effective_area.per_bin.nu_mu.cc.track.txt"
    area_data = quickload_full(
        os.path.join(
            os.path.join(config["datapath"], "charm_search_supplemental",
                         "effective areas/"), filename))

    e_edges = area_data["e_reco"]
    a_edges = area_data["cth_true"]
    net_flux = np.zeros(shape=(len(e_edges) - 1, len(a_edges) - 1))

    f = open(file_dir, 'rt')
    print("Parsing MC")
    while True:
        line = f.readline()
        if line == "":
            break  #EOF

        parsed = parseline(line)
        if parsed is None:
            continue  # comment line

        if parsed[0] < 0:
            key = "Mu_nuBar_CC"
        else:
            key = "Mu_nu_CC"

        flux_here = metaflux(parsed[3], parsed[4], key)

        if flux_here == 0.0:
            continue

        if parsed[1] < e_edges[0] or parsed[1] > e_edges[-1]:
            continue
        if parsed[2] < a_edges[0] or parsed[2] > a_edges[-1]:
            continue
        i_e = get_loc(parsed[1], e_edges)[0]
        i_a = get_loc(parsed[2], a_edges)[0]

        # let's let this event add tothe total in this bin!
        net_flux[i_e][i_a] = net_flux[i_e][i_a] + livetime * (
            365. / 343.7) * parsed[5] * flux_here

    return {
        "e_edges": e_edges,
        "a_edges": a_edges,
        "event_rate": net_flux,
        "stat_err": np.sqrt(net_flux)
    }
               inline=True,
               fmt=fmt,
               fontsize=10,
               manual=loc)


evs = [1.1, 5.0, 10.0]


def s2(theta):
    si = np.sin(2 * theta)
    return si * si


for ev in evs:
    which_sliver = get_loc(ev, msqs)[0]
    chis = np.zeros(shape=(len(theta24s), len(theta34s)))
    for t24 in range(len(theta24s)):
        for t34 in range(len(theta34s)):
            chis[t24][t34] = chi2[t24][t34][which_sliver]

    if log_mode:
        plt.pcolormesh(s2(theta24s),
                       s2(theta34s),
                       chis.transpose(),
                       vmin=0,
                       vmax=20,
                       cmap="PuBu")
        cbar = plt.colorbar(extend='max')
        ct = plt.contour(s2(theta24s),
                         s2(theta34s),
Exemple #15
0
    raw_fudge = np.transpose(np.loadtxt(fudge_file, delimiter=","))
    energies = raw_fudge[0]
    fudge = raw_fudge[1]

    bin_angles = np.linspace(-1, 1, 11)
    bin_energies = np.logspace(2, 8, 21)

    full_fudge = np.zeros(shape=(20, 10))

    for e_i in range(20):
        if bin_energies[e_i] < energies[0]:
            j = 0
        elif bin_energies[e_i] > energies[-1]:
            j = -1
        else:
            j = get_loc(0.5 * (bin_energies[e_i] + bin_energies[e_i + 1]),
                        energies)[0]
        for a_i in range(10):
            full_fudge[e_i][a_i] = fudge[j]

#true_fudge = np.load("../full_fudge.npy")

# fix_norm
options = {
    "is_mc": False,
    "skip_missing": True,
    "use_syst": True,
    "smear": True
}

#llhood = doLLH("expected_flux_smearedwell.dat",central_exp=central, options=options)
llhood = doLLH("best_expected_flux.dat", central_exp=central, options=options)