def get_osc_flux(flux_maps, osc_service=None, deltam21=None, deltam31=None, energy_scale=None, theta12=None, theta13=None, theta23=None, deltacp=None, **kwargs): ''' Obtain a map in energy and cos(zenith) of the oscillation probabilities from the OscillationService and compute the oscillated flux. Inputs: flux_maps - dictionary of atmospheric flux ['nue','numu','nue_bar','numu_bar'] osc_service - a handle to an OscillationService others - oscillation parameters to compute oscillation probability maps from. ''' #Be verbose on input params = get_params() report_params(params, units=['rad', 'eV^2', 'eV^2', '', 'rad', 'rad', 'rad']) #Initialize return dict osc_flux_maps = {'params': add_params(params, flux_maps['params'])} #Get oscillation probability map from service osc_prob_maps = osc_service.get_osc_prob_maps(deltam21=deltam21, deltam31=deltam31, theta12=theta12, theta13=theta13, theta23=theta23, deltacp=deltacp, energy_scale=energy_scale, **kwargs) ebins, czbins = get_binning(flux_maps) for to_flav in ['nue', 'numu', 'nutau']: for mID in ['', '_bar']: # 'matter' ID nue_flux = flux_maps['nue' + mID]['map'] numu_flux = flux_maps['numu' + mID]['map'] oscflux = { 'ebins': ebins, 'czbins': czbins, 'map': (nue_flux * osc_prob_maps['nue' + mID + '_maps'][to_flav + mID] + numu_flux * osc_prob_maps['numu' + mID + '_maps'][to_flav + mID]) } osc_flux_maps[to_flav + mID] = oscflux return osc_flux_maps
def get_event_rates(osc_flux_maps,aeff_service,livetime=None,nu_nubar_ratio=None, aeff_scale=None,**kwargs): ''' Main function for this module, which returns the event rate maps for each flavor and interaction type, using true energy and zenith information. The content of each bin will be the weighted aeff multiplied by the oscillated flux, so that the returned dictionary will be of the form: {'nue': {'cc':map,'nc':map}, 'nue_bar': {'cc':map,'nc':map}, ... 'nutau_bar': {'cc':map,'nc':map} } \params: * osc_flux_maps - maps containing oscillated fluxes * aeff_service - the effective area service to use * livetime - detector livetime for which to calculate event counts * nu_nubar_ratio - systematic to be a proxy for the realistic counts_nue(cc/nc) / counts_nuebar(cc/nc), ... ratios, keeping the total flavour counts constant. The adjusted ratios are given by "nu_nubar_ratio * original ratio". * aeff_scale - systematic to be a proxy for the realistic effective area ''' #Get parameters used here params = get_params() report_params(params,units = ['','yrs','']) #Initialize return dict event_rate_maps = {'params': add_params(params,osc_flux_maps['params'])} #Get effective area aeff_dict = aeff_service.get_aeff() ebins, czbins = get_binning(osc_flux_maps) # apply the scaling for nu_xsec_scale and nubar_xsec_scale... flavours = ['nue','numu','nutau','nue_bar','numu_bar','nutau_bar'] for flavour in flavours: osc_flux_map = osc_flux_maps[flavour]['map'] int_type_dict = {} for int_type in ['cc','nc']: event_rate = osc_flux_map*aeff_dict[flavour][int_type]*aeff_scale event_rate *= (livetime*Julian_year) int_type_dict[int_type] = {'map':event_rate, 'ebins':ebins, 'czbins':czbins} logging.debug(" Event Rate before reco for %s/%s: %.2f" %(flavour,int_type,np.sum(event_rate))) event_rate_maps[flavour] = int_type_dict # now scale the nu(e/mu/tau) / nu(e/mu/tau)bar event count ratios, keeping the total # (nue + nuebar etc.) constant if nu_nubar_ratio != 1.: return apply_nu_nubar_ratio(event_rate_maps, nu_nubar_ratio) # else: no scaling to be applied return event_rate_maps
def get_event_rates(osc_flux_maps, aeff_service, livetime=None, aeff_scale=None, **kwargs): ''' Main function for this module, which returns the event rate maps for each flavor and interaction type, using true energy and zenith information. The content of each bin will be the weighted aeff multiplied by the oscillated flux, so that the returned dictionary will be of the form: {'nue': {'cc':map,'nc':map}, 'nue_bar': {'cc':map,'nc':map}, ... 'nutau_bar': {'cc':map,'nc':map} } \params: * osc_flux_maps - maps containing oscillated fluxes * aeff_service - the effective area service to use * livetime - detector livetime for which to calculate event counts * aeff_scale - systematic to be a proxy for the realistic effective area ''' #Get parameters used here params = get_params() report_params(params, units=['', 'yrs', '']) #Initialize return dict event_rate_maps = {'params': add_params(params, osc_flux_maps['params'])} #Get effective area aeff_dict = aeff_service.get_aeff() ebins, czbins = get_binning(osc_flux_maps) # apply the scaling for nu_xsec_scale and nubar_xsec_scale... flavours = ['nue', 'numu', 'nutau', 'nue_bar', 'numu_bar', 'nutau_bar'] for flavour in flavours: osc_flux_map = osc_flux_maps[flavour]['map'] int_type_dict = {} for int_type in ['cc', 'nc']: event_rate = osc_flux_map * aeff_dict[flavour][ int_type] * aeff_scale event_rate *= (livetime * Julian_year) int_type_dict[int_type] = { 'map': event_rate, 'ebins': ebins, 'czbins': czbins } logging.debug(" Event Rate before reco for %s/%s: %.2f" % (flavour, int_type, np.sum(event_rate))) event_rate_maps[flavour] = int_type_dict # else: no scaling to be applied return event_rate_maps
def get_osc_flux(flux_maps,osc_service=None,deltam21=None,deltam31=None, energy_scale=None, theta12=None,theta13=None,theta23=None, deltacp=None,YeI=None,YeO=None,YeM=None,**kwargs): ''' Obtain a map in energy and cos(zenith) of the oscillation probabilities from the OscillationService and compute the oscillated flux. Inputs: flux_maps - dictionary of atmospheric flux ['nue','numu','nue_bar','numu_bar'] osc_service - a handle to an OscillationService others - oscillation parameters to compute oscillation probability maps from. ''' #Be verbose on input params = get_params() report_params(params, units = ['','','','rad','eV^2','eV^2','','rad','rad','rad']) #Initialize return dict osc_flux_maps = {'params': add_params(params,flux_maps['params'])} #Get oscillation probability map from service osc_prob_maps = osc_service.get_osc_prob_maps(deltam21=deltam21, deltam31=deltam31, theta12=theta12, theta13=theta13, theta23=theta23, deltacp=deltacp, energy_scale=energy_scale, YeI=YeI,YeO=YeO,YeM=YeM, **kwargs) ebins, czbins = get_binning(flux_maps) for to_flav in ['nue','numu','nutau']: for mID in ['','_bar']: # 'matter' ID nue_flux = flux_maps['nue'+mID]['map'] numu_flux = flux_maps['numu'+mID]['map'] oscflux = {'ebins':ebins, 'czbins':czbins, 'map':(nue_flux*osc_prob_maps['nue'+mID+'_maps'][to_flav+mID] + numu_flux*osc_prob_maps['numu'+mID+'_maps'][to_flav+mID]) } osc_flux_maps[to_flav+mID] = oscflux return osc_flux_maps
def get_event_rates(osc_flux_maps, aeff_service, livetime=None, aeff_scale=None, **kwargs): """ Main function for this module, which returns the event rate maps for each flavor and interaction type, using true energy and zenith information. The content of each bin will be the weighted aeff multiplied by the oscillated flux, so that the returned dictionary will be of the form: {'nue': {'cc':map,'nc':map}, 'nue_bar': {'cc':map,'nc':map}, ... 'nutau_bar': {'cc':map,'nc':map} } \params: * osc_flux_maps - maps containing oscillated fluxes * aeff_service - the effective area service to use * livetime - detector livetime for which to calculate event counts * aeff_scale - systematic to be a proxy for the realistic effective area """ # Get parameters used here params = get_params() report_params(params, units=["", "yrs", ""]) # Initialize return dict event_rate_maps = {"params": add_params(params, osc_flux_maps["params"])} # Get effective area aeff_dict = aeff_service.get_aeff() ebins, czbins = get_binning(osc_flux_maps) # apply the scaling for nu_xsec_scale and nubar_xsec_scale... flavours = ["nue", "numu", "nutau", "nue_bar", "numu_bar", "nutau_bar"] for flavour in flavours: osc_flux_map = osc_flux_maps[flavour]["map"] int_type_dict = {} for int_type in ["cc", "nc"]: event_rate = osc_flux_map * aeff_dict[flavour][int_type] * aeff_scale event_rate *= livetime * Julian_year int_type_dict[int_type] = {"map": event_rate, "ebins": ebins, "czbins": czbins} logging.debug(" Event Rate before reco for %s/%s: %.2f" % (flavour, int_type, np.sum(event_rate))) event_rate_maps[flavour] = int_type_dict # else: no scaling to be applied return event_rate_maps
def get_pid_maps(reco_events,pid_service,**kwargs): ''' Takes the templates of reco_events in form of: 'nue_cc': map 'numu_cc': map 'nutau_cc': map 'nuall_nc': map And applies PID returning a dictionary of events in form of: {'trk': {'ebins':ebins,'czbins':czbins,'map':map}, 'csc': {'ebins':ebins,'czbins':czbins,'map':map}} ''' #Be verbose on input params = get_params() report_params(params, units = []) #Initialize return dict ebins, czbins = get_binning(reco_events) reco_events_pid = { 'trck': {'map':np.zeros_like(reco_events['nue_cc']['map']), 'czbins':czbins, 'ebins':ebins}, 'cscd': {'map':np.zeros_like(reco_events['nue_cc']['map']), 'czbins':czbins, 'ebins':ebins}, 'params': add_params(params,reco_events['params']), } pid_dict = pid_service.get_maps() flavours = ['nue_cc','numu_cc','nutau_cc','nuall_nc'] for flav in flavours: event_map = reco_events[flav]['map'] to_trck_map = event_map*pid_dict[flav]['trck'] to_cscd_map = event_map*pid_dict[flav]['cscd'] reco_events_pid['trck']['map'] += to_trck_map reco_events_pid['cscd']['map'] += to_cscd_map return reco_events_pid
def get_event_rates(osc_flux_maps,aeff_service,livetime=None,nu_xsec_scale=None, nubar_xsec_scale=None,aeff_scale=None,**kwargs): ''' Main function for this module, which returns the event rate maps for each flavor and interaction type, using true energy and zenith information. The content of each bin will be the weighted aeff multiplied by the oscillated flux, so that the returned dictionary will be of the form: {'nue': {'cc':map,'nc':map}, 'nue_bar': {'cc':map,'nc':map}, ... 'nutau_bar': {'cc':map,'nc':map} } ''' #Get parameters used here params = get_params() report_params(params,units = ['','yrs','','']) #Initialize return dict event_rate_maps = {'params': add_params(params,osc_flux_maps['params'])} #Get effective area aeff_dict = aeff_service.get_aeff() ebins, czbins = get_binning(osc_flux_maps) # apply the scaling for nu_xsec_scale and nubar_xsec_scale... flavours = ['nue','numu','nutau','nue_bar','numu_bar','nutau_bar'] for flavour in flavours: osc_flux_map = osc_flux_maps[flavour]['map'] int_type_dict = {} for int_type in ['cc','nc']: event_rate = osc_flux_map*aeff_dict[flavour][int_type]*aeff_scale scale = nubar_xsec_scale if 'bar' in flavour else nu_xsec_scale event_rate *= (scale*livetime*Julian_year) int_type_dict[int_type] = {'map':event_rate, 'ebins':ebins, 'czbins':czbins} event_rate_maps[flavour] = int_type_dict return event_rate_maps
def get_pid_maps(reco_events, pid_service=None, recalculate=False, return_unknown=False, **kwargs): """ Primary function for this service, which returns the classified event rate maps (sorted after tracks and cascades) from the reconstructed ones (sorted after nu[e,mu,tau]_cc and nuall_nc). """ if recalculate: pid_service.recalculate_kernels(**kwargs) #Be verbose on input params = get_params() report_params(params, units = []) #Initialize return dict empty_map = {'map': np.zeros_like(reco_events['nue_cc']['map']), 'czbins': pid_service.czbins, 'ebins': pid_service.ebins} reco_events_pid = {'trck': copy(empty_map), 'cscd': copy(empty_map), 'params': add_params(params,reco_events['params']), } if return_unknown: reco_events_pid['unkn'] = copy(empty_map) #Classify events for flav in reco_events: if flav=='params': continue event_map = reco_events[flav]['map'] to_trck_map = event_map*pid_service.pid_kernels[flav]['trck'] to_cscd_map = event_map*pid_service.pid_kernels[flav]['cscd'] reco_events_pid['trck']['map'] += to_trck_map reco_events_pid['cscd']['map'] += to_cscd_map if return_unknown: reco_events_pid['unkn']['map'] += (event_map - to_trck_map - to_cscd_map) return reco_events_pid
def get_reco_maps(true_event_maps, reco_service=None, e_reco_scale=None, cz_reco_scale=None, **kwargs): """ Primary function for this stage, which returns the reconstructed event rate maps from the true event rate maps. The returned maps will be in the form of a dictionary with parameters: {'nue_cc':{'ebins':ebins,'czbins':czbins,'map':map}, 'numu_cc':{...}, 'nutau_cc':{...}, 'nuall_nc':{...}} Note that in this function, the nu<x> is now combined with nu_bar<x>. """ # Be verbose on input params = get_params() report_params(params, units = ['', '']) # Initialize return dict reco_maps = {'params': add_params(params, true_event_maps['params'])} # Check binning ebins, czbins = get_binning(true_event_maps) # Retrieve all reconstruction kernels reco_kernel_dict = reco_service.get_reco_kernels( e_reco_scale=e_reco_scale, cz_reco_scale=cz_reco_scale, **kwargs ) # Do smearing flavours = ['nue', 'numu', 'nutau'] int_types = ['cc', 'nc'] # Do smearing again, without loops flavors = ['nue', 'numu', 'nutau'] all_int_types = ['cc', 'nc'] n_ebins = len(ebins)-1 n_czbins = len(czbins)-1 for baseflavor, int_type in itertools.product(flavors, all_int_types): logging.info("Getting reco event rates for %s %s" % (baseflavor, int_type)) reco_event_rate = np.zeros((n_ebins, n_czbins), dtype=np.float64) for mID in ['', '_bar']: flavor = baseflavor + mID true_event_rate = true_event_maps[flavor][int_type]['map'] kernels = reco_kernel_dict[flavor][int_type] r0 = np.tensordot(true_event_rate, kernels, axes=([0,1],[0,1])) reco_event_rate += r0 reco_maps[baseflavor+'_'+int_type] = {'map': reco_event_rate, 'ebins': ebins, 'czbins': czbins} msg = "after RECO: counts for (%s + %s) %s: %.2f" \ % (baseflavor, baseflavor+'_bar', int_type, np.sum(reco_event_rate)) logging.debug(msg) # Finally sum up all the NC contributions logging.info("Summing up rates for all nc events") reco_event_rate = np.sum( [reco_maps.pop(key)['map'] for key in reco_maps.keys() if key.endswith('_nc')], axis=0 ) reco_maps['nuall_nc'] = {'map':reco_event_rate, 'ebins':ebins, 'czbins':czbins} logging.debug("Total counts for nuall nc: %.2f" % np.sum(reco_event_rate)) return reco_maps
def get_reco_maps(true_event_maps, reco_service=None, e_reco_scale=None, cz_reco_scale=None, **kwargs): """ Primary function for this stage, which returns the reconstructed event rate maps from the true event rate maps. The returned maps will be in the form of a dictionary with parameters: {'nue_cc':{'ebins':ebins,'czbins':czbins,'map':map}, 'numu_cc':{...}, 'nutau_cc':{...}, 'nuall_nc':{...}} Note that in this function, the nu<x> is now combined with nu_bar<x>. """ # Be verbose on input params = get_params() report_params(params, units = ['', '']) # Initialize return dict reco_maps = {'params': add_params(params, true_event_maps['params'])} # Check binning ebins, czbins = get_binning(true_event_maps) # Retrieve all reconstruction kernels reco_kernel_dict = reco_service.get_reco_kernels( e_reco_scale=e_reco_scale, cz_reco_scale=cz_reco_scale, **kwargs ) # DEBUG / HACK to store the computed kernels to a file #reco_service.store_kernels('reco_kernels.hdf5', fmt='hdf5') # Do smearing flavours = ['nue', 'numu', 'nutau'] int_types = ['cc', 'nc'] # Do smearing again, without loops flavors = ['nue', 'numu', 'nutau'] all_int_types = ['cc', 'nc'] n_ebins = len(ebins)-1 n_czbins = len(czbins)-1 for baseflavor, int_type in itertools.product(flavors, all_int_types): logging.info("Getting reco event rates for %s %s" % (baseflavor, int_type)) reco_event_rate = np.zeros((n_ebins, n_czbins), dtype=np.float64) for mID in ['', '_bar']: flavor = baseflavor + mID true_event_rate = true_event_maps[flavor][int_type]['map'] kernels = reco_kernel_dict[flavor][int_type] r0 = np.tensordot(true_event_rate, kernels, axes=([0,1],[0,1])) reco_event_rate += r0 reco_maps[baseflavor+'_'+int_type] = {'map': reco_event_rate, 'ebins': ebins, 'czbins': czbins} msg = "after RECO: counts for (%s + %s) %s: %.2f" \ % (baseflavor, baseflavor+'_bar', int_type, np.sum(reco_event_rate)) logging.debug(msg) # Finally sum up all the NC contributions logging.info("Summing up rates for all nc events") reco_event_rate = np.sum( [reco_maps.pop(key)['map'] for key in reco_maps.keys() if key.endswith('_nc')], axis=0 ) reco_maps['nuall_nc'] = {'map':reco_event_rate, 'ebins':ebins, 'czbins':czbins} logging.debug("Total counts for nuall nc: %.2f" % np.sum(reco_event_rate)) return reco_maps
def get_reco_maps(true_event_maps,reco_service=None,e_reco_scale=None, cz_reco_scale=None, **kwargs): ''' Primary function for this module, which returns the reconstructed event rate maps from the true event rate maps, and from the smearing kernal obtained from simulations. The returned maps will be in the form of a dictionary with parameters: {'nue_cc':{'ebins':ebins,'czbins':czbins,'map':map}, 'numu_cc':{...}, 'nutau_cc':{...}, 'nuall_nc':{...} } Note that in this function, the nu<x> is now combined with nu_bar<x>. ''' #Be verbose on input params = get_params() report_params(params, units = ['','']) #Initialize return dict reco_maps = {'params': add_params(params,true_event_maps['params'])} #Get kernels from reco service kernel_dict = reco_service.get_kernels() ebins, czbins = get_binning(true_event_maps) flavours = ['nue','numu','nutau'] int_types = ['cc','nc'] for int_type in int_types: for flavor in flavours: logging.info("Getting reco event rates for %s %s"%(flavor,int_type)) reco_evt_rate = np.zeros((len(ebins)-1,len(czbins)-1), dtype=np.float32) for mID in ['','_bar']: flav = flavor+mID true_evt_rate = true_event_maps[flav][int_type]['map'] kernels = kernel_dict[flav][int_type] for ie,egy in enumerate(ebins[:-1]): for icz,cz in enumerate(czbins[:-1]): # Get kernel at these true parameters from 4D hist kernel = kernels[ie,icz] # normalize if np.sum(kernel) > 0.0: kernel /= np.sum(kernel) reco_evt_rate += true_evt_rate[ie,icz]*kernel reco_maps[flavor+'_'+int_type] = {'map':reco_evt_rate, 'ebins':ebins, 'czbins':czbins} physics.trace("Total counts for %s %s: %.2f" %(flavor,int_type,np.sum(reco_evt_rate))) #Finally sum up all the NC contributions logging.info("Summing up rates for %s %s"%('all',int_type)) reco_evt_rate = np.sum([reco_maps.pop(key)['map'] for key in reco_maps.keys() if key.endswith('_nc')], axis = 0) reco_maps['nuall_nc'] = {'map':reco_evt_rate, 'ebins':ebins, 'czbins':czbins} physics.trace("Total event counts: %.2f"%np.sum(reco_evt_rate)) # Apply e_reco_scaling... # Apply cz_reco_scaling... return reco_maps