Example #1
0
    def get_aeff_flavor(self,flavor,aeff_egy_par,aeff_coszen_par):
        '''
        Creates the 2d aeff file from the parameterized aeff
        vs. energy .dat file, an input to the parametric settings file.
        '''

        aeff_file = aeff_egy_par[flavor]
        aeff_arr = np.loadtxt(open_resource(aeff_file)).T
        # interpolate
        aeff_func = interp1d(aeff_arr[0], aeff_arr[1], kind='linear',
                             bounds_error=False, fill_value=0)

        czcen = get_bin_centers(self.czbins)
        ecen = get_bin_centers(self.ebins)

        # Get 1D array interpolated values at bin centers, assume no cz dep
        aeff1d = aeff_func(ecen)

        # Make this into a 2D array:
        aeff2d = np.reshape(np.repeat(aeff1d, len(czcen)), (len(ecen), len(czcen)))

        # Now add cz-dependence, assuming nu and nu_bar has same dependence:
        cz_dep = eval(aeff_coszen_par[flavor.strip('_bar')])(czcen)
        # Normalize:
        cz_dep *= len(cz_dep)/np.sum(cz_dep)

        return (aeff2d*cz_dep)
Example #2
0
    def __init__(self,ebins,czbins,particle_ID=None,**kwargs):

        logging.info('Initializing PIDServicePar...')

        #Evaluate the functions at the bin centers
        ecen = get_bin_centers(ebins)
        czcen = get_bin_centers(czbins)

        self.pid_maps = {}
        for signature in particle_ID.keys():
            #Generate the functions
            to_trck_func = eval(particle_ID[signature]['trck'])
            to_cscd_func = eval(particle_ID[signature]['cscd'])

            #Make maps from the functions evaluated at the bin centers
            _,to_trck_map = np.meshgrid(czcen, to_trck_func(ecen))
            _,to_cscd_map = np.meshgrid(czcen, to_cscd_func(ecen))

            for label,pidmap in [('Track',to_trck_map),('Cascade',to_cscd_map)]:
                if (pidmap < 0).any():
                    raise ValueError('%s PID probabilites can not be negative!'
                        ' Investigate parameterization'%label)

            self.pid_maps[signature] = {'trck':to_trck_map,
                                        'cscd':to_cscd_map}
    def __init__(self, ebins, czbins, detector_depth=None, earth_model=None,
                 prop_height=None, oversample_e=None,oversample_cz=None,
                 **kwargs):
        """
        \params:
          * ebins: Energy bin edges
          * czbins: cos(zenith) bin edges
          * earth_model: Earth density model used for matter oscillations.
          * detector_depth: Detector depth in km.
          * prop_height: Height in the atmosphere to begin in km.
        """

        logging.info('Instantiating %s'%self.__class__.__name__)
        self.ebins = np.array(ebins)
        self.czbins = np.array(czbins)
        for ax in [self.ebins, self.czbins]:
            if (len(np.shape(ax)) != 1):
                raise IndexError('Axes must be 1d! '+str(np.shape(ax)))

        report_params(get_params(),['km','','','','km'])

        earth_model = find_resource(earth_model)
        self.earth_model = earth_model
        self.FTYPE = np.float64

        self.ebins_fine = oversample_binning(self.ebins, oversample_e)
        self.czbins_fine = oversample_binning(self.czbins, oversample_cz)
        self.ecen_fine = get_bin_centers(self.ebins_fine)
        self.czcen_fine = get_bin_centers(self.czbins_fine)

        self.initialize_kernel()

        return
    def __init__(self, ebins, czbins, detector_depth=None, earth_model=None,
                 prop_height=None, oversample_e=None,oversample_cz=None,gpu_id=None,
                 **kwargs):
        """
        \params:
          * ebins: Energy bin edges
          * czbins: cos(zenith) bin edges
          * earth_model: Earth density model used for matter oscillations.
          * detector_depth: Detector depth in km.
          * prop_height: Height in the atmosphere to begin in km.
          * gpu_id: If running on a system with multiple GPUs, it will choose
            the one with gpu_id. Otherwise, defaults to default context
        """

        self.gpu_id = gpu_id
        try:
            import pycuda.autoinit
            self.context = cuda.Device(self.gpu_id).make_context()
            print "Initializing PyCUDA using gpu id: %d"%self.gpu_id
        except:
            import pycuda.autoinit
            print "Auto initializing PyCUDA..."

        #mfree,mtot = cuda.mem_get_info()
        #print "free memory: %s mb",mfree/1.0e6
        #print "tot memory:  %s mb",mtot/1.0e6
        #raw_input("PAUSED...")

        logging.info('Instantiating %s'%self.__class__.__name__)
        self.ebins = np.array(ebins)
        self.czbins = np.array(czbins)
        self.prop_height = prop_height
        for ax in [self.ebins, self.czbins]:
            if (len(np.shape(ax)) != 1):
                raise IndexError('Axes must be 1d! '+str(np.shape(ax)))

        report_params(get_params(),['km','','','',''])

        earth_model = find_resource(earth_model)
        self.earth_model = earth_model
        self.FTYPE = np.float64

        self.ebins_fine = oversample_binning(self.ebins, oversample_e)
        self.czbins_fine = oversample_binning(self.czbins, oversample_cz)
        self.ecen_fine = get_bin_centers(self.ebins_fine)
        self.czcen_fine = get_bin_centers(self.czbins_fine)

        self.initialize_kernel(detector_depth,**kwargs)

        return
Example #5
0
def SaveAeff(aeff,aeff_err,egy_bin_edges,flavor,out_dir):

    # Correct for nutau/nutaubar:
    for i in range(len(egy_bin_edges)-1):
        if(aeff_err[i] < 1.0e-12): aeff_err[i] = 1.0e-12

    ecen = get_bin_centers(egy_bin_edges)
    splinefit = splrep(ecen,aeff,w=1./np.array(aeff_err), k=3, s=100)
    fit_aeff = splev(ecen,splinefit)

    outfile = os.path.join(out_dir,"a_eff_"+flavor+".dat")
    print "Saving spline fit to file: "+outfile
    fh = open(outfile,'w')
    for i,energy in enumerate(ecen):
        fh.write(str(energy)+' '+str(fit_aeff[i])+'\n')
    fh.close()

    outfile_data = os.path.join(out_dir,"a_eff_"+flavor+"_data.dat")
    print "Saving data to file: "+outfile_data
    fh = open(outfile_data,'w')
    for i,energy in enumerate(ecen):
        fh.write(str(energy)+' '+str(aeff[i])+' '+str(aeff_err[i])+'\n')
    fh.close()

    return
Example #6
0
def SaveAeff(aeff,aeff_err,egy_bin_edges,flavor,out_dir):

    # Correct for nutau/nutaubar:
    for i in range(len(egy_bin_edges)-1):
        if(aeff_err[i] < 1.0e-12): aeff_err[i] = 1.0e-12

    ecen = get_bin_centers(egy_bin_edges)
    splinefit = splrep(ecen,aeff,w=1./np.array(aeff_err), k=3, s=100)
    fit_aeff = splev(ecen,splinefit)

    outfile = os.path.join(out_dir,"a_eff_"+flavor+".dat")
    print "Saving spline fit to file: "+outfile
    fh = open(outfile,'w')
    for i,energy in enumerate(ecen):
        fh.write(str(energy)+' '+str(fit_aeff[i])+'\n')
    fh.close()

    outfile_data = os.path.join(out_dir,"a_eff_"+flavor+"_data.dat")
    print "Saving data to file: "+outfile_data
    fh = open(outfile_data,'w')
    for i,energy in enumerate(ecen):
        fh.write(str(energy)+' '+str(aeff[i])+' '+str(aeff_err[i])+'\n')
    fh.close()

    return
Example #7
0
    def read_param_string(self, param_str):
        """
        Parse the dict with the parametrization strings and evaluate for
        the bin energies needed.
        """

        evals = get_bin_centers(self.ebins)
        n_e = len(self.ebins) - 1
        n_cz = len(self.czbins) - 1

        parametrization = {}
        for flavour in param_str:
            parametrization[flavour] = {}
            for int_type in param_str[flavour]:  #['cc', 'nc']
                logging.debug('Parsing function strings for %s %s' %
                              (flavour, int_type))
                parametrization[flavour][int_type] = {}
                for axis in param_str[flavour][
                        int_type]:  #['energy', 'coszen']
                    parameters = {}
                    for par, funcstring in param_str[flavour][int_type][
                            axis].items():
                        # this should contain a lambda function:
                        function = eval(funcstring)
                        logging.trace('  function: %s' % funcstring)
                        # evaluate the function at the given energies
                        vals = function(evals)
                        # repeat for all cos(zen) bins
                        parameters[par] = np.repeat(vals, n_cz).reshape(
                            (n_e, n_cz))
                    parametrization[flavour][int_type][axis] = copy(parameters)
        return parametrization
def plot_error(llr, nbins, **kwargs):
    """Given llr distribution Series, calculates the error bars and plots
    them """
    hist_vals, xbins = np.histogram(llr, bins=nbins)
    bincen = get_bin_centers(xbins)
    plt.errorbar(bincen, hist_vals, yerr=np.sqrt(hist_vals), **kwargs)
    return hist_vals, bincen
Example #9
0
    def read_param_string(self, param_str):
        """
        Parse the dict with the parametrization strings and evaluate for
        the bin energies needed.
        """

        evals = get_bin_centers(self.ebins)
        n_e = len(self.ebins)-1
        n_cz = len(self.czbins)-1

        parametrization = {}
        for flavour in param_str:
          parametrization[flavour] = {}
          for int_type in param_str[flavour]:    #['cc', 'nc']
            logging.debug('Parsing function strings for %s %s'
                          %(flavour, int_type))
            parametrization[flavour][int_type] = {}
            for axis in param_str[flavour][int_type]:    #['energy', 'coszen']
                parameters = {}
                for par, funcstring in param_str[flavour][int_type][axis].items():
                    # this should contain a lambda function:
                    function = eval(funcstring)
                    logging.trace('  function: %s'%funcstring)
                    # evaluate the function at the given energies
                    vals = function(evals)
                    # repeat for all cos(zen) bins
                    parameters[par] = np.repeat(vals,n_cz).reshape((n_e,n_cz))
                parametrization[flavour][int_type][axis] = copy(parameters)
        return parametrization
Example #10
0
    def __init__(self, ebins, czbins, detector_depth=None, earth_model=None,
                 prop_height=None, oversample_e=None,oversample_cz=None,gpu_id=None,
                 **kwargs):
        """
        \params:
          * ebins: Energy bin edges
          * czbins: cos(zenith) bin edges
          * earth_model: Earth density model used for matter oscillations.
          * detector_depth: Detector depth in km.
          * prop_height: Height in the atmosphere to begin in km.
          * gpu_id: If running on a system with multiple GPUs, it will choose
            the one with gpu_id. Otherwise, defaults to default context
        """

        self.gpu_id = gpu_id
        try:
            import pycuda.autoinit
            self.context = cuda.Device(self.gpu_id).make_context()
            print "Initializing PyCUDA using gpu id: %d"%self.gpu_id
        except:
            import pycuda.autoinit
            print "Auto initializing PyCUDA..."


        logging.info('Instantiating %s'%self.__class__.__name__)
        self.ebins = np.array(ebins)
        self.czbins = np.array(czbins)
        self.prop_height = prop_height
        for ax in [self.ebins, self.czbins]:
            if (len(np.shape(ax)) != 1):
                raise IndexError('Axes must be 1d! '+str(np.shape(ax)))

        report_params(get_params(),['km','','','','km'])

        earth_model = find_resource(earth_model)
        self.earth_model = earth_model
        self.FTYPE = np.float64

        self.ebins_fine = oversample_binning(self.ebins, oversample_e)
        self.czbins_fine = oversample_binning(self.czbins, oversample_cz)
        self.ecen_fine = get_bin_centers(self.ebins_fine)
        self.czcen_fine = get_bin_centers(self.czbins_fine)

        self.initialize_kernel(detector_depth,**kwargs)

        return
Example #11
0
    def get_osc_probLT_dict(self,
                            ebins=None,
                            czbins=None,
                            oversample_e=None,
                            oversample_cz=None,
                            **kwargs):
        """
        This will create the oscillation probability map lookup tables
        (LT) corresponding to atmospheric neutrinos oscillation
        through the earth, and will return a dictionary of maps:
        {'nue_maps':[to_nue_map, to_numu_map, to_nutau_map],
         'numu_maps: [...],
         'nue_bar_maps': [...],
         'numu_bar_maps': [...],
         'czbins':czbins,
         'ebins': ebins}
        Will call fill_osc_prob to calculate the individual
        probabilities on the fly.
        By default, the standard binning is oversampled by a factor 10.
        Alternatively, the oversampling factor can be changed or a fine
        binning specified explicitly. In the latter case, the oversampling
        factor is ignored.
        """
        #First initialize the fine binning if not explicitly given
        if not check_fine_binning(ebins, self.ebins):
            ebins = oversample_binning(self.ebins, oversample_e)
        if not check_fine_binning(czbins, self.czbins):
            czbins = oversample_binning(self.czbins, oversample_cz)
        ecen = get_bin_centers(ebins)
        czcen = get_bin_centers(czbins)

        osc_prob_dict = {}
        for nu in ['nue_maps', 'numu_maps', 'nue_bar_maps', 'numu_bar_maps']:
            isbar = '_bar' if 'bar' in nu else ''
            osc_prob_dict[nu] = {
                'nue' + isbar: [],
                'numu' + isbar: [],
                'nutau' + isbar: [],
            }

        evals, czvals = self.fill_osc_prob(osc_prob_dict, ecen, czcen,
                                           **kwargs)
        osc_prob_dict['evals'] = evals
        osc_prob_dict['czvals'] = czvals

        return osc_prob_dict
Example #12
0
    def __init__(self,pid_data,ebins,czbins):

        #Evaluate the functions at the bin centers
        ecen = get_bin_centers(ebins)
        czcen = get_bin_centers(czbins)

        self.pid_maps = {}
        for signature in pid_data.keys():
            #Generate the functions
            to_trck_func = eval(pid_data[signature]['trck'])
            to_cscd_func = eval(pid_data[signature]['cscd'])

            #Make maps from the functions evaluate at the bin centers
            _,to_trck_map = np.meshgrid(czcen, to_trck_func(ecen))
            _,to_cscd_map = np.meshgrid(czcen, to_cscd_func(ecen))

            self.pid_maps[signature] = {'trck':to_trck_map,
                                        'cscd':to_cscd_map}
Example #13
0
def get_median_energy(flux_map):
    """Returns the median energy of the flux_map-expected to be a dict
    with keys 'map', 'ebins', 'czbins'
    """

    ecen = get_bin_centers(flux_map['ebins'])
    energy = ecen[len(ecen) / 2]

    return energy
Example #14
0
def get_median_energy(flux_map):
    """Returns the median energy of the flux_map-expected to be a dict
    with keys 'map', 'ebins', 'czbins'
    """

    ecen = get_bin_centers(flux_map['ebins'])
    energy = ecen[len(ecen)/2]

    return energy
Example #15
0
    def get_osc_probLT_dict(self,theta12,theta13,theta23,deltam21,deltam31,deltacp,
                            eminLT = 1.0, emaxLT =80.0, nebinsLT=500,
                            czminLT=-1.0, czmaxLT= 1.0, nczbinsLT=500):
        '''
        This will create the oscillation probability map lookup tables
        (LT) corresponding to atmospheric neutrinos oscillation
        through the earth, and will return a dictionary of maps:
        {'nue_maps':[to_nue_map, to_numu_map, to_nutau_map],
         'numu_maps: [...],
         'nue_bar_maps': [...], 
         'numu_bar_maps': [...], 
         'czbins':czbins, 
         'ebins': ebins} 
        Uses the BargerPropagator code to calculate the individual
        probabilities on the fly.

        NOTE: Expects all angles to be in [rad], and all deltam to be in [eV^2]
        '''

        # First initialize all empty maps to use in osc_prob_dict
        ebins = np.logspace(np.log10(eminLT),np.log10(emaxLT),nebinsLT+1)
        czbins = np.linspace(czminLT,czmaxLT,nczbinsLT+1)
        ecen = get_bin_centers(ebins)
        czcen = get_bin_centers(czbins)
        
        osc_prob_dict = {'ebins':ebins, 'czbins':czbins}
        shape = (len(ecen),len(czcen))
        for nu in ['nue_maps','numu_maps','nue_bar_maps','numu_bar_maps']:
            if 'bar' in nu:
                osc_prob_dict[nu] = {'nue_bar': np.zeros(shape,dtype=np.float32),
                                     'numu_bar': np.zeros(shape,dtype=np.float32),
                                     'nutau_bar': np.zeros(shape,dtype=np.float32)}
            else:
                osc_prob_dict[nu] = {'nue': np.zeros(shape,dtype=np.float32),
                                     'numu': np.zeros(shape,dtype=np.float32),
                                     'nutau': np.zeros(shape,dtype=np.float32)}
        
        self.fill_osc_prob(osc_prob_dict, ecen, czcen,
                           theta12=theta12, theta13=theta13, theta23=theta23,
                           deltam21=deltam21, deltam31=deltam31, deltacp=deltacp)
        
        return osc_prob_dict
Example #16
0
    def get_osc_probLT_dict(self, ebins=None, czbins=None,
                            oversample_e=None,oversample_cz=None, **kwargs):
        """
        This will create the oscillation probability map lookup tables
        (LT) corresponding to atmospheric neutrinos oscillation
        through the earth, and will return a dictionary of maps:
        {'nue_maps':[to_nue_map, to_numu_map, to_nutau_map],
         'numu_maps: [...],
         'nue_bar_maps': [...],
         'numu_bar_maps': [...],
         'czbins':czbins,
         'ebins': ebins}
        Will call fill_osc_prob to calculate the individual
        probabilities on the fly.
        By default, the standard binning is oversampled by a factor 10.
        Alternatively, the oversampling factor can be changed or a fine
        binning specified explicitly. In the latter case, the oversampling
        factor is ignored.
        """
        #First initialize the fine binning if not explicitly given
        if not check_fine_binning(ebins, self.ebins):
            ebins = oversample_binning(self.ebins, oversample_e)
        if not check_fine_binning(czbins, self.czbins):
            czbins = oversample_binning(self.czbins, oversample_cz)
        ecen = get_bin_centers(ebins)
        czcen = get_bin_centers(czbins)

        osc_prob_dict = {}
        for nu in ['nue_maps','numu_maps','nue_bar_maps','numu_bar_maps']:
            isbar = '_bar' if 'bar' in nu else ''
            osc_prob_dict[nu] = {'nue'+isbar: [],
                                 'numu'+isbar: [],
                                 'nutau'+isbar: [],}

        evals,czvals = self.fill_osc_prob(osc_prob_dict, ecen, czcen, **kwargs)
        osc_prob_dict['evals'] = evals
        osc_prob_dict['czvals'] = czvals

        return osc_prob_dict
Example #17
0
    def get_flux(self, ebins, czbins, prim):
        """Get the flux in units [m^-2 s^-1] for the given
           bin edges in energy and cos(zenith) and the primary."""

        #Evaluate the flux at the bin centers
        evals = get_bin_centers(ebins)
        czvals = get_bin_centers(czbins)

        # Get the spline interpolation, which is in
        # log(flux) as function of log(E), cos(zenith)
        return_table = bisplev(np.log10(evals), czvals, self.spline_dict[prim])
        return_table = np.power(10., return_table).T

        #Flux is given per sr and GeV, so we need to multiply
        #by bin width in both dimensions
        #Get the bin size in both dimensions
        ebin_sizes = get_bin_sizes(ebins)
        czbin_sizes = 2. * np.pi * get_bin_sizes(czbins)
        bin_sizes = np.meshgrid(ebin_sizes, czbin_sizes)

        return_table *= np.abs(bin_sizes[0] * bin_sizes[1])

        return return_table.T
Example #18
0
 def get_flux(self, ebins, czbins, prim):
     '''Get the flux in units [m^-2 s^-1] for the given
        bin edges in energy and cos(zenith) and the primary.'''
     
     #Evaluate the flux at the bin centers
     evals = get_bin_centers(ebins)
     czvals = get_bin_centers(czbins)
 
     # Get the spline interpolation, which is in
     # log(flux) as function of log(E), cos(zenith)
     return_table = bisplev(np.log10(evals), czvals, self.spline_dict[prim])
     return_table = np.power(10., return_table).T
 
     #Flux is given per sr and GeV, so we need to multiply
     #by bin width in both dimensions
     #Get the bin size in both dimensions
     ebin_sizes = get_bin_sizes(ebins)
     czbin_sizes = 2.*np.pi*get_bin_sizes(czbins)
     bin_sizes = np.meshgrid(ebin_sizes, czbin_sizes)
 
     return_table *= np.abs(bin_sizes[0]*bin_sizes[1])
 
     return return_table.T
Example #19
0
def apply_delta_index(flux_maps, delta_index, egy_med):
    """
    Applies the spectral index systematic to the flux maps by scaling
    each bin with (egy_cen/egy_med)^(-delta_index), preserving the total
    integral flux  Note that only the numu/numu_bar are scaled, because
    the nue_numu_ratio will handle the systematic on the nue flux.
    """

    for flav in ['numu','numu_bar']:
        ecen = get_bin_centers(flux_maps[flav]['ebins'])
        scale = np.power((ecen/egy_med),delta_index)
        flux_map = flux_maps[flav]['map']
        total_flux = flux_map.sum()
        logging.trace("flav: %s, total counts before scale: %f"%(flav,total_flux))
        scaled_flux = (flux_map.T*scale).T
        scaled_flux *= (total_flux/scaled_flux.sum())
        flux_maps[flav]['map'] = scaled_flux
        logging.trace("flav: %s, total counts after scale: %f"%
                      (flav,flux_maps[flav]['map'].sum()))

    return flux_maps
Example #20
0
def apply_delta_index(flux_maps, delta_index, egy_med):
    """
    Applies the spectral index systematic to the flux maps by scaling
    each bin with (egy_cen/egy_med)^(-delta_index), preserving the total
    integral flux  Note that only the numu/numu_bar are scaled, because
    the nue_numu_ratio will handle the systematic on the nue flux.
    """

    for flav in ['numu', 'numu_bar']:
        ecen = get_bin_centers(flux_maps[flav]['ebins'])
        scale = np.power((ecen / egy_med), delta_index)
        flux_map = flux_maps[flav]['map']
        total_flux = flux_map.sum()
        logging.trace("flav: %s, total counts before scale: %f" %
                      (flav, total_flux))
        scaled_flux = (flux_map.T * scale).T
        scaled_flux *= (total_flux / scaled_flux.sum())
        flux_maps[flav]['map'] = scaled_flux
        logging.trace("flav: %s, total counts after scale: %f" %
                      (flav, flux_maps[flav]['map'].sum()))

    return flux_maps
Example #21
0
    def _get_reco_kernels(self, flipback=True,
                          e_reco_scale=None, cz_reco_scale=None,
                          **kwargs):
        """
        Use the parametrization functions to calculate the actual reco
        kernels (i.e. 4D histograms). If flipback==True, the zenith angle
        part that goes below the zenith will be mirrored back in.
        """
        if all([hasattr(self, 'kernels'), e_reco_scale==1., cz_reco_scale==1.]):
            logging.info('Using existing kernels for reconstruction')
            return self.kernels

        logging.info('Creating parametrized reconstruction kernels')

        # get binning information
        evals, esizes = get_bin_centers(self.ebins), get_bin_sizes(self.ebins)
        czvals, czsizes = get_bin_centers(self.czbins), get_bin_sizes(self.czbins)
        czbins = self.czbins 
        n_e, n_cz = len(evals), len(czvals)

        # prepare for folding back at lower edge
        if not is_linear(self.czbins):
            logging.warn("cos(zenith) bins have different "
                         "sizes! Unable to fold around edge "
                         "of histogram, will not do that.")
            flipback = False

        if flipback:
            czvals = np.append(czvals-(self.czbins[-1]-self.czbins[0]),
                               czvals)
            czbins = np.append(czbins[:-1]-(self.czbins[-1]-self.czbins[0]),
                               czbins)
            czsizes = np.append(czsizes, czsizes)

        # get properly scaled parametrization, initialize kernels
        parametrization = self.apply_reco_scales(e_reco_scale, cz_reco_scale)
        kernel_dict = {}

        for flavour in parametrization:
          kernel_dict[flavour] = {}
          for int_type in ['cc', 'nc']:
            logging.debug('Calculating parametrized reconstruction kernel for %s %s'
                          %(flavour, int_type))

            # create empty kernel
            kernel = np.zeros((n_e, n_cz, n_e, n_cz))

            # quick handle to parametrization
            e_pars = parametrization[flavour][int_type]['energy']
            cz_pars = parametrization[flavour][int_type]['coszen']

            # loop over every bin in true (energy, coszen)
            for (i, j) in itertools.product(range(n_e), range(n_cz)):

                e_kern_cdf = double_gauss(self.ebins,
                                          loc1=e_pars['loc1'][i,j]+evals[i],
                                          width1=e_pars['width1'][i,j],
                                          loc2=e_pars['loc2'][i,j]+evals[i],
                                          width2=e_pars['width2'][i,j],
                                          fraction=e_pars['fraction'][i,j])
		e_kern_int = np.sum(e_kern_cdf)

                offset = n_cz if flipback else 0

                cz_kern_cdf = double_gauss(czbins,
                                           loc1=cz_pars['loc1'][i,j]+czvals[j+offset],
                                           width1=cz_pars['width1'][i,j],
                                           loc2=cz_pars['loc2'][i,j]+czvals[j+offset],
                                           width2=cz_pars['width2'][i,j],
                                           fraction=cz_pars['fraction'][i,j])
                cz_kern_int = np.sum(cz_kern_cdf)

                if flipback:
                    # fold back
                    cz_kern_cdf = cz_kern_cdf[:len(czbins)/2][::-1] + cz_kern_cdf[len(czbins)/2:]

                kernel[i,j] = np.outer(e_kern_cdf, cz_kern_cdf)

            kernel_dict[flavour][int_type] = copy(kernel)

        kernel_dict['ebins'] = self.ebins
        kernel_dict['czbins'] = self.czbins

        return kernel_dict
Example #22
0
class PIDServiceParam(PIDServiceBase):
    """
    Creates PID kernels from parametrization functions that are stored
    in a JSON dict. numpy is accessible as np, and scipy.stats.
    Systematic parameters 'PID_offset' and 'PID_scale' are supported.
    """
    def __init__(self, ebins, czbins, **kwargs):
        """
        Parameters needed to initialize a PID service with parametrizations:
        * ebins: Energy bin edges
        * czbins: cos(zenith) bin edges
        * pid_paramfile: JSON containing the parametrizations
        """
        PIDServiceBase.__init__(self, ebins, czbins, **kwargs)

    def get_pid_kernels(self,
                        pid_paramfile=None,
                        PID_offset=0.,
                        PID_scale=1.,
                        **kwargs):

        # load parametrization file
        logging.info('Opening PID parametrization file %s' % pid_paramfile)
        try:
            param_str = from_json(find_resource(pid_paramfile))
        except IOError, e:
            logging.error("Unable to open PID parametrization file %s" %
                          pid_paramfile)
            logging.error(e)
            sys.exit(1)

        ecen = get_bin_centers(self.ebins)
        czcen = get_bin_centers(self.czbins)

        self.pid_kernels = {
            'binning': {
                'ebins': self.ebins,
                'czbins': self.czbins
            }
        }
        for signature in param_str.keys():
            #Generate the functions
            to_trck_func = eval(param_str[signature]['trck'])
            to_cscd_func = eval(param_str[signature]['cscd'])

            # Make maps from the functions evaluated at the bin centers
            #
            # NOTE: np.where() is to catch the low energy nutau events
            # that are undefined. Often what happens is that the nutau
            # parameterization for trck events will drop below 0.0 at
            # low energies, but there are no nutau events at these
            # energies anyway, so we just set them to zero (and cscd =
            # 1.0) if the condition arises.
            to_trck = to_trck_func(ecen - PID_offset)
            to_cscd = to_cscd_func(ecen - PID_offset)
            to_trck = np.where(to_trck < 0.0, 0.0,
                               np.where(to_trck > 1.0, 1.0, to_trck))
            to_cscd = np.where(to_cscd < 0.0, 0.0,
                               np.where(to_cscd > 1.0, 1.0, to_cscd))
            _, to_trck_map = np.meshgrid(czcen, PID_scale * to_trck)
            _, to_cscd_map = np.meshgrid(czcen, PID_scale * to_cscd)

            self.pid_kernels[signature] = {
                'trck': to_trck_map,
                'cscd': to_cscd_map
            }

        return self.pid_kernels
Example #23
0
# Set to false, since we are using sin^2(2 theta) variables                   
kSquared = False
sin2th12Sq = np.sin(2.0*args.theta12)**2
sin2th13Sq = np.sin(2.0*args.theta13)**2
sin2th23Sq = np.sin(2.0*args.theta23)**2

neutrinos = ['nue','numu','nutau']
anti_neutrinos = ['nue_bar','numu_bar','nutau_bar']


nu_barger = {'nue':1,'numu':2,'nutau':3,
             'nue_bar':1,'numu_bar':2,'nutau_bar':3}

# Initialize dictionary to hold the osc prob maps
osc_prob_dict = {'ebins':ebins, 'czbins':czbins}
ecen = get_bin_centers(ebins)
czcen = get_bin_centers(czbins)
shape = (len(ebins),len(czbins))
for nu in ['nue_maps','numu_maps','nue_bar_maps','numu_bar_maps']:
    isbar = '_bar' if 'bar' in nu else ''
    osc_prob_dict[nu] = {'nue'+isbar: np.zeros(shape,dtype=np.float32),
                         'numu'+isbar: np.zeros(shape,dtype=np.float32),
                         'nutau'+isbar: np.zeros(shape,dtype=np.float32)}
    
    
logging.info("Getting oscillation probability maps...")
total_bins = int(len(ebins)*len(czbins))
mod = total_bins/50
ibin = 0
for icz, coszen in enumerate(czcen):
    for ie,energy in enumerate(ecen):
Example #24
0
    def _get_reco_kernels(self,
                          flipback=True,
                          e_reco_scale=None,
                          cz_reco_scale=None,
                          **kwargs):
        """
        Use the parametrization functions to calculate the actual reco
        kernels (i.e. 4D histograms). If flipback==True, the zenith angle
        part that goes below the zenith will be mirrored back in.
        """
        if all([
                hasattr(self, 'kernels'), e_reco_scale == 1.,
                cz_reco_scale == 1.
        ]):
            logging.info('Using existing kernels for reconstruction')
            return self.kernels

        logging.info('Creating parametrized reconstruction kernels')

        # get binning information
        evals, esizes = get_bin_centers(self.ebins), get_bin_sizes(self.ebins)
        czvals, czsizes = get_bin_centers(self.czbins), get_bin_sizes(
            self.czbins)
        czbins = self.czbins
        n_e, n_cz = len(evals), len(czvals)

        # prepare for folding back at lower edge
        if not is_linear(self.czbins):
            logging.warn("cos(zenith) bins have different "
                         "sizes! Unable to fold around edge "
                         "of histogram, will not do that.")
            flipback = False

        if flipback:
            czvals = np.append(czvals - (self.czbins[-1] - self.czbins[0]),
                               czvals)
            czbins = np.append(
                czbins[:-1] - (self.czbins[-1] - self.czbins[0]), czbins)
            czsizes = np.append(czsizes, czsizes)

        # get properly scaled parametrization, initialize kernels
        parametrization = self.apply_reco_scales(e_reco_scale, cz_reco_scale)
        kernel_dict = {}

        for flavour in parametrization:
            kernel_dict[flavour] = {}
            for int_type in ['cc', 'nc']:
                logging.debug(
                    'Calculating parametrized reconstruction kernel for %s %s'
                    % (flavour, int_type))

                # create empty kernel
                kernel = np.zeros((n_e, n_cz, n_e, n_cz))

                # quick handle to parametrization
                e_pars = parametrization[flavour][int_type]['energy']
                cz_pars = parametrization[flavour][int_type]['coszen']

                # loop over every bin in true (energy, coszen)
                for (i, j) in itertools.product(range(n_e), range(n_cz)):

                    e_kern_cdf = double_gauss(
                        self.ebins,
                        loc1=e_pars['loc1'][i, j] + evals[i],
                        width1=e_pars['width1'][i, j],
                        loc2=e_pars['loc2'][i, j] + evals[i],
                        width2=e_pars['width2'][i, j],
                        fraction=e_pars['fraction'][i, j])
                    e_kern_int = np.sum(e_kern_cdf)

                    offset = n_cz if flipback else 0

                    cz_kern_cdf = double_gauss(
                        czbins,
                        loc1=cz_pars['loc1'][i, j] + czvals[j + offset],
                        width1=cz_pars['width1'][i, j],
                        loc2=cz_pars['loc2'][i, j] + czvals[j + offset],
                        width2=cz_pars['width2'][i, j],
                        fraction=cz_pars['fraction'][i, j])
                    cz_kern_int = np.sum(cz_kern_cdf)

                    if flipback:
                        # fold back
                        cz_kern_cdf = cz_kern_cdf[:len(czbins) /
                                                  2][::-1] + cz_kern_cdf[
                                                      len(czbins) / 2:]

                    kernel[i, j] = np.outer(e_kern_cdf, cz_kern_cdf)

                kernel_dict[flavour][int_type] = copy(kernel)

        kernel_dict['ebins'] = self.ebins
        kernel_dict['czbins'] = self.czbins

        return kernel_dict
Example #25
0
neutrinos = ['nue', 'numu', 'nutau']
anti_neutrinos = ['nue_bar', 'numu_bar', 'nutau_bar']

nu_barger = {
    'nue': 1,
    'numu': 2,
    'nutau': 3,
    'nue_bar': 1,
    'numu_bar': 2,
    'nutau_bar': 3
}

# Initialize dictionary to hold the osc prob maps
osc_prob_dict = {'ebins': ebins, 'czbins': czbins}
ecen = get_bin_centers(ebins)
czcen = get_bin_centers(czbins)
shape = (len(ebins), len(czbins))
for nu in ['nue_maps', 'numu_maps', 'nue_bar_maps', 'numu_bar_maps']:
    isbar = '_bar' if 'bar' in nu else ''
    osc_prob_dict[nu] = {
        'nue' + isbar: np.zeros(shape, dtype=np.float32),
        'numu' + isbar: np.zeros(shape, dtype=np.float32),
        'nutau' + isbar: np.zeros(shape, dtype=np.float32)
    }

logging.info("Getting oscillation probability maps...")
total_bins = int(len(ebins) * len(czbins))
mod = total_bins / 50
ibin = 0
for icz, coszen in enumerate(czcen):