예제 #1
0
def getWeights(s):

    # Weighting modules from offline software
    #from icecube.weighting.fluxes import Hoerandel, Hoerandel_IT
    import icecube.weighting.fluxes as fluxes
    from icecube.weighting.weighting import from_simprod

    simList = sorted(list(set(s['sim'])))
    simList = [int(sim) for sim in simList]
    if 9166 in simList:
        simList[simList.index(9166)] = 9122

    print 'Making generator...'
    generator = from_simprod(simList[0])[1] * 0
    for sim in simList:
        nfiles, gen = from_simprod(sim)
        generator += nfiles * gen

    print 'Calculating weights...'
    flux = fluxes.Hoerandel_IT()
    #flux = fluxes.Hoerandel()
    weights = flux(s['MC_energy'], s['MC_type']) / \
              generator(s['MC_energy'], s['MC_type'])

    return weights
예제 #2
0
파일: fluxes.py 프로젝트: achim1/pyevsel
def generated_corsika_flux(ebinc,datasets):
    """
    Calculate the livetime of a number of given coriska datasets using the weighting moduel
    The calculation here means a comparison of the number of produced events per energy bin
    with the expected event yield from fluxes in nature. If necessary call home to the simprod db.
    Works for 5C datasets.

    Args:
        ebinc (np.array): Energy bins (centers)
        datasets (list): A list of dictionaries with properties of the datasets or dataset numbers. If only nu8mbers are given, then simprod db will be queried
            format of dataset dict:
            example_datasets ={42: {"nevents": 1,\
                   "nfiles": 1,\
                   "emin": 1,\
                   "emax": 1,\
                   "normalization": [10., 5., 3., 2., 1.],\
                   "gamma": [-2.]*5,\
                   "LowerCutoffType": 'EnergyPerNucleon',\
                   "UpperCutoffType": 'EnergyPerParticle',\
                   "height": 1600,\
                   "radius": 800}}

    Returns:
        tuple (generated protons, generated irons)
    """
    
    if isinstance(datasets,dict):
        pass

    elif not isinstance(datasets,list):
        datasets = list(datasets)

    generators = []
    for ds in datasets:
       
        if not isinstance(datasets,dict):
            assert len(ds.values()) == 1, "Too many arguments per dataset"

        if isinstance(ds,int):
            db_result = from_simprod(ds)
            if isinstance(db_result, tuple):
                db_result = db_result[1]
            generators.append(db_result*datasets[ds])

        elif isinstance(ds.values()[0],int):
            db_result = from_simprod(int(ds.keys()[0]))
            if isinstance(db_result,tuple):
                db_result = db_result[1]
            generators.append(db_result*ds.values()[0])
        elif isinstance(ds.values()[0],dict):
            nfiles = ds.pop("nfiles")
            generators.append(nfiles*FiveComponent(**ds))
        else:
            raise ValueError("Problems understanding dataset properties {}".format(ds.__repr__()))

    gensum = reduce(lambda x, y: x + y, generators)
    p_gen  = AREA_SUM*gensum(ebinc,2212)
    fe_gen = AREA_SUM*gensum(ebinc,1000260560)

    return p_gen, fe_gen
예제 #3
0
def get_effective_area(df_sim, energy_bins, verbose=True):

    if verbose: print('Calculating effective area...')
    simlist = np.unique(df_sim['sim'])
    for i, sim in enumerate(simlist):
        gcd_file, sim_files = comp.simfunctions.get_level3_sim_files(sim)
        num_files = len(sim_files)
        if verbose: print('Simulation set {}: {} files'.format(sim, num_files))
        if i == 0:
            generator = num_files * from_simprod(int(sim))
        else:
            generator += num_files * from_simprod(int(sim))
    energy = df_sim['MC_energy'].values
    ptype = df_sim['MC_type'].values
    num_ptypes = np.unique(ptype).size
    areas = 1.0 / generator(energy, ptype)
    binwidth = 2 * np.pi * (1 - np.cos(40 *
                                       (np.pi / 180))) * np.diff(energy_bins)
    eff_area = np.histogram(energy, weights=areas,
                            bins=energy_bins)[0] / binwidth
    eff_area_error = np.sqrt(
        np.histogram(energy, bins=energy_bins, weights=areas**2)[0]) / binwidth

    energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2

    return eff_area / num_ptypes, eff_area_error / num_ptypes, energy_midpoints
예제 #4
0
def get_effective_area(df, cut_mask=None):

    if cut_mask is not None:
        df_cut = df[cut_mask]
    else:
        df_cut = df

    print('Calculating effective area...')
    simlist = np.unique(df_cut['sim'])
    num_files_dict = {
        '7006': 30000,
        '7007': 30000,
        '7579': 12000,
        '7784': 12000
    }
    for i, sim in enumerate(simlist):
        if i == 0:
            generator = num_files_dict[sim] * from_simprod(int(sim))
        else:
            generator += num_files_dict[sim] * from_simprod(int(sim))
    energy = df_cut['MC_energy'].values
    ptype = df_cut['MC_type'].values
    areas = 1.0 / generator(energy, ptype)
    energy_bins = np.logspace(5.0, 9.51, 75)
    energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2
    # binwidth = 2*np.pi*(1-np.cos(np.arccos(0.8)))*np.diff(energy_bins)
    binwidth = 2 * np.pi * (1 - np.cos(40 *
                                       (np.pi / 180))) * np.diff(energy_bins)
    # binwidth = 4*np.pi*np.diff(energy_bins)
    eff_area = np.histogram(energy, weights=areas,
                            bins=energy_bins)[0] / binwidth
    eff_area_error = np.sqrt(
        np.histogram(energy, bins=energy_bins, weights=areas**2)[0]) / binwidth

    return eff_area, eff_area_error, energy_midpoints
예제 #5
0
def generate_generator(dataset_number, n_files, outpath=None):
    if isinstance(dataset_number, Iterable) and isinstance(n_files, Iterable):
        if len(dataset_number) != len(
                np.flatnonzero(np.asarray(dataset_number))):
            print('At least one of the present datasets of this type doesnt '
                  'have a generator. The weighting is done with OneWeight and '
                  'there is only the current dataset taken into account for '
                  'the weighting!')
            return None
        if len(dataset_number) != len(n_files):
            raise ValueError('Dataset_number and n_files have to be the same '
                             'length if both are supposed to be Iterables.')
        else:
            for i in range(len(dataset_number)):
                if i == 0:
                    generator = from_simprod(dataset_number[i]) * n_files[i]
                else:
                    generator += from_simprod(dataset_number[i]) * n_files[i]
    elif (isinstance(dataset_number, int) or
          isinstance(dataset_number, float)) and \
         (isinstance(n_files, int) or
          isinstance(n_files, float)):
        generator = from_simprod(dataset_number) * n_files
    else:
        raise ValueError('Dataset_number and n_files either have to be both '
                         'numbers (int or float) or be both Iterables of the '
                         'same length.')
    if outpath is not None:
        with open(outpath, 'w') as open_file:
            pickle.dump(generator, open_file, protocol=2)
    return outpath
예제 #6
0
def calc_gen_ow(frame, gcdfile):
    soft = from_simprod(11029)
    hard_lowE = from_simprod(11069)
    hard_highE = from_simprod(11070)
    generator = 3190 * soft + 3920 * hard_highE + 997 * hard_lowE
    dataset = str(frame['I3EventHeader'].run_id)[0:5]
    ow = generator(frame['MCPrimary1'].energy, frame['I3MCWeightDict']['PrimaryNeutrinoType'],
                   np.cos(frame['MCPrimary1'].dir.zenith))/weight_info[dataset]['nevents']
    return ow
예제 #7
0
def calculate_effective_area_vs_energy(df_sim, energy_bins, verbose=True):
    '''Calculated effective area vs. energy from simulation

    Parameters
    ----------
    df_sim : pandas.DataFrame
        Simulation DataFrame returned from comptools.load_sim.
    energy_bins : array-like
        Energy bins (in GeV) that will be used for calculation.
    verbose : bool, optional
        Option for verbose output (default is True).

    Returns
    -------
    eff_area : numpy.ndarray
        Effective area for each bin in energy_bins
    eff_area_error : numpy.ndarray
        Statistical ucertainty on the effective area for each bin in
        energy_bins.
    energy_midpoints : numpy.ndarray
        Midpoints of energy_bins. Useful for plotting effective area versus
        energy.

    '''

    if verbose:
        print('Calculating effective area...')

    simlist = np.unique(df_sim['sim'])
    # # Get the number of times each composition is present
    # comp_counter = collections.Counter([sim_to_comp(sim) for sim in simlist])
    # print('comp_counter = {}'.format(comp_counter))
    for i, sim in enumerate(simlist):
        num_files = len(level3_sim_files(sim))
        if verbose:
            print('Simulation set {}: {} files'.format(sim, num_files))
        if i == 0:
            generator = num_files*from_simprod(int(sim))
        else:
            generator += num_files*from_simprod(int(sim))

    energy = df_sim['MC_energy'].values
    ptype = df_sim['MC_type'].values
    # num_ptypes = 2
    num_ptypes = np.unique(ptype).size
    cos_theta = np.cos(df_sim['MC_zenith']).values
    areas = 1.0/generator(energy, ptype, cos_theta)
    # binwidth = 2*np.pi*(1-np.cos(40*(np.pi/180)))*np.diff(energy_bins)
    binwidth = 2*np.pi*(1-np.cos(40*(np.pi/180)))*np.diff(energy_bins)*num_ptypes
    eff_area = np.histogram(energy, weights=areas, bins=energy_bins)[0]/binwidth
    eff_area_error = np.sqrt(np.histogram(energy, bins=energy_bins, weights=areas**2)[0])/binwidth

    energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2

    return eff_area, eff_area_error, energy_midpoints
예제 #8
0
파일: weighting.py 프로젝트: achim1/pyevsel
def GetGenerator(datasets):
    """
    datasets must be a dict of dataset_id : number_of_files

    Args:
        datasets (dict): Query the database for these datasets.
                         dict dataset_id -> number of files

    Returns (icecube.weighting...): Generation probability object
    """

    generators = []
    for k in datasets.keys():
        nfiles = datasets[k]
        generator = from_simprod(k)
        # depending on the version of the
        # weighting module, either nfiles,generator
        # or just generator is returned
        if isinstance(generator,tuple):
            generator = generator[1]

        generators.append(nfiles*generator)

    generator = reduce(lambda x,y : x+y, generators)
    return generator
예제 #9
0
def getWeights(s, config, badRunFile):

    # IC86 simulation files are already weighted
    if config in ['IC86','IC86-II','IC86-III']:
        return None

    sim = int(simFunctions.cfg2sim(config))
    from icecube.weighting.fluxes import Hoerandel5
    from icecube.weighting.weighting import from_simprod

    # Load bad simulation files
    with open(badRunFile, 'r') as f:
        badFiles = f.readlines()
        badFiles = [l.strip() for l in badFiles if str(sim) in l.split('/')]
        nbad = len(badFiles)

    # Make generator
    nfiles, generator = from_simprod(sim)
    nfiles -= nbad
    generator *= nfiles
    flux = Hoerandel5()

    print 'Calculating weights...'
    weights = flux(s['energy'], s['type']) / \
            generator(s['energy'], s['type'])

    # Eliminate CNO and MgSiAl components that weighting can't deal with
    weights[weights!=weights] = 0
    weights[weights==np.inf]  = 0

    return weights
예제 #10
0
def check_oneweight(dataset):
    generator = weighting.from_simprod(dataset)

    url = get_random_filename(dataset)
    try:
        if 'pnfs' in url:
            raise RuntimeError("Can't get %s from convey" % url)
        frame = dataio.I3File(url).pop_daq()
    except RuntimeError as e:
        icetray.logging.log_error(str(e))
        return
    if frame is None:
        icetray.logging.log_error('Could not read ' + url)
        return
    else:
        icetray.logging.log_info("Got " + url)

    nu = [p for p in frame['I3MCTree'].primaries if p.is_neutrino][0]
    icetray.logging.log_info(str(nu))
    wdict = frame['I3MCWeightDict']
    mine = wdict['TotalInteractionProbabilityWeight'] / generator(
        nu.energy, nu.type, math.cos(nu.dir.zenith))
    # OneWeight is in units of cm^2, and must be normalized to the number of
    # neutrinos or antineutrinos ()
    theirs = wdict['OneWeight'] / (1e4 * wdict['NEvents'] / 2.)

    assert_array_almost_equal_nulp(mine, theirs, 4)
def get_sim_weights(df_sim):

    simlist = np.unique(df_sim['sim'])
    print('simlist = {}'.format(simlist))
    for i, sim in enumerate(simlist):
        gcd_file, sim_files = comp.simfunctions.get_level3_sim_files(sim)
        num_files = len(sim_files)
        print('Simulation set {}: {} files'.format(sim, num_files))
        if i == 0:
            generator = num_files * from_simprod(int(sim))
        else:
            generator += num_files * from_simprod(int(sim))
    energy = df_sim['MC_energy'].values
    ptype = df_sim['MC_type'].values
    num_ptypes = np.unique(ptype).size
    cos_theta = np.cos(df_sim['MC_zenith']).values
    weights = 1.0 / generator(energy, ptype, cos_theta)

    return weights
예제 #12
0
 def __getitem__(self, dataset):
     dataset = int(dataset)
     if not dataset in self:
         if self.readonly:
             raise KeyError(
                 'Dataset %s is not in the cache. Pre-populate the cache (%s) before using it non-interactively.'
                 % (dataset, self.filename))
         from icecube.weighting.weighting import from_simprod
         self[dataset] = from_simprod(int(dataset))
     return dict.__getitem__(self, dataset)
예제 #13
0
def sim_to_comp(sim):
    # Will utilize the weighting project found here
    # http://software.icecube.wisc.edu/documentation/projects/weighting

    # Query database to extract composition from simulation set
    generator = from_simprod(int(sim))
    # Ensure that there is only one composition
    assert len(generator.spectra) == 1
    composition = generator.spectra.keys()[0].name

    return composition
예제 #14
0
def one_weight_builder_2012(prime_E,
                            prime_Type,
                            prime_coszen,
                            total_weight,
                            ds_nums=[11029, 11069, 11070],
                            ds_nfiles=[3190, 3920, 997]):
    """builds OneWeights for training on the combined sim sets 11029, 11069 and 11070.
    the generator is basically the #events per energy range
    prime_E: ["MCPrimary1"].energy
    prime_Type: ["MCPrimary1"].type
    prime_coszen: cos(["MCPrimary1"].dir.zenith)
    total_weight: ["I3MCWeightDict"]["TotalInteractionProbabilityWeight"]
    returns the OneWeight/E for this specific event, i.e. weighted with an 
    E**-1 flux for constant weight in log bins
    """

    generator_sum = np.sum([
        from_simprod(ds_num) * ds_nfiles[i] for i, ds_num in enumerate(ds_nums)
    ])
    return total_weight / (generator_sum(
        prime_E, particle_type=prime_Type, cos_theta=prime_coszen) * prime_E)
예제 #15
0
    def _get_flux_and_norm(self, frame, flux_name, dataset, CORSIKA):
        if isinstance(dataset, str):
            dataset = int(dataset)

        # obtain needed values 
        ptype = frame['I3MCPrimary'].type
        energy = frame['I3MCPrimary'].energy

        if not CORSIKA:
            # obtain needed values 
            zenith = frame['I3MCPrimary'].dir.zenith
            one_weight = frame['I3MCWeightDict']['OneWeight']
            n_events = frame['I3MCWeightDict']['NEvents']

            # look up flux for given values and chosen flux model
            flux = NuFlux.makeFlux(flux_name).getFlux
            flux = flux(ptype, energy, cos(zenith)) * one_weight

            # check if neutrino or anti-neutrino is present
            # need to use neutrino-/anti-neutrino-ratio of chosen data set
            if 'Bar' not in str(ptype):
                family_ratio = 0.7
            else:
                family_ratio = 0.3

            # normalize weight to given amount of files, produced events and
            # particle/anti-particle ratio
            norm = (n_events * family_ratio) 
        else:
            # CORSIKA
            # look up flux for given values and chosen flux model
            flux = getattr(fluxes, flux_name)()
            flux = flux(energy, ptype)

            norm = from_simprod(dataset)
            norm = norm(energy, ptype)

        return flux, norm
예제 #16
0
    def _get_flux_and_norm(self, frame, flux_name, dataset, CORSIKA):
        if isinstance(dataset, str):
            dataset = int(dataset)

        # obtain needed values
        ptype = frame['I3MCPrimary'].type
        energy = frame['I3MCPrimary'].energy

        if not CORSIKA:
            # obtain needed values
            zenith = frame['I3MCPrimary'].dir.zenith
            one_weight = frame['I3MCWeightDict']['OneWeight']
            n_events = frame['I3MCWeightDict']['NEvents']

            # look up flux for given values and chosen flux model
            flux = NuFlux.makeFlux(flux_name).getFlux
            flux = flux(ptype, energy, cos(zenith)) * one_weight

            # check if neutrino or anti-neutrino is present
            # need to use neutrino-/anti-neutrino-ratio of chosen data set
            if 'Bar' not in str(ptype):
                family_ratio = 0.7
            else:
                family_ratio = 0.3

            # normalize weight to given amount of files, produced events and
            # particle/anti-particle ratio
            norm = (n_events * family_ratio)
        else:
            # CORSIKA
            # look up flux for given values and chosen flux model
            flux = getattr(fluxes, flux_name)()
            flux = flux(energy, ptype)

            norm = from_simprod(dataset)
            norm = norm(energy, ptype)

        return flux, norm
예제 #17
0
    files += f

print(len(files))

event_dtype = np.dtype([
    ("set", np.uint32),
    ("energy", np.float32),
    ("qtot", np.float32),
    ("ptype", np.uint32),
    ("weight", np.float32),
])

generator = None
for dataset_number_i, n_files_i in dsets.items():
    if generator is None:
        generator = n_files_i * from_simprod(dataset_number_i)
    else:
        print(dataset_number_i)
        generator += n_files_i * from_simprod(dataset_number_i)


def TruePtype(ptype):
    s = str(ptype)[:6]
    if s == '100002':
        ptype = 1000020040
    elif s == '100007':
        ptype = 1000070140
    elif s == '100008':
        ptype = 1000080160
    elif s == '100013':
        ptype = 1000130270
예제 #18
0
def WeightEvents(tray,
                 name,
                 infiles,
                 dataset_type,
                 dataset_n_files,
                 dataset_n_events_per_run,
                 dataset_number,
                 muongun_equal_generator=False,
                 key='weights',
                 use_from_simprod=False,
                 add_mceq_weights=False,
                 mceq_kwargs={},
                 add_nuveto_pf=False,
                 nuveto_kwargs={},
                 add_mese_weights=False,
                 add_atmospheric_self_veto=False,
                 check_n_files=True):
    """Calculate weights and add to frame

    Parameters
    ----------
    tray : Icetray
        The IceCube Icetray
    name : str
        Name of I3Segement
    infiles : list of str
        A list of the input file paths.
    dataset_type : str
        Defines the kind of data: 'nugen', 'genie', 'muongun', 'corsika'
    dataset_n_files : int
        Number of files in dataset. For MuonGun this is overwritten by the
        number of found generators. In this case, this value is only used
        to check if it matches the found n_files (if check is performed).
    dataset_n_events_per_run : int
        Number of events per run. Needed for MESE weights.
    dataset_number : int
        Corsika dataset number.
    muongun_equal_generator : bool, optional
        If True, it is assumed that all MuonGun generator objects are the same.
        In this case, only the first found MuonGun generator will be used
        and multiplied by the provided 'dataset_n_files'.
    key : str
        Defines the key to which the weight dictionary will be booked.
    use_from_simprod : bool, optional
        If True, weights will be calculated by obtaining a generator via
        from_simprod. If False, weights will be calculated based on the
        I3MCWeightDict for NuGen or CorsikaWeightMap (Corsika).
    add_mceq_weights : bool, optional
        If True, MCEq weights will be added. Make sure to add an existing
        cache file, otherwise this may take very long!
    mceq_kwargs : dict, optional
        Keyword arguments passed on to MCEq.
    add_nuveto_pf : bool, optional
        If True, nuVeto passing fractions will be added. Make sure to add
        an existing cache file, otherwise this may take very long!
    nuveto_kwargs : dict, optional
        Keyword arguments passed on to nuVeto.
    add_mese_weights : bool, optional
        If true, weights used for MESE 7yr cascade paper will be added.
        (As well as an additional filtering step)
    add_atmospheric_self_veto : bool, optional
        If True, the atmospheric self-veto passing fractions will be calculated
        and written to the frame.
    check_n_files : bool or list of str, optional
        If true, check if provided n_files argument seems reasonable.
        If list of str and if dataset_type is in the defined list:
        check if provided n_files arguments seems reasonable.
        The list of str defines the datatypes (in lower case) for which the
        n_files will be checked.

    Raises
    ------
    ValueError
        Description
    """
    dataset_type = dataset_type.lower()

    if dataset_type == 'muongun':

        # get fluxes and generator
        fluxes, flux_names = fluxes_muongun.get_fluxes_and_names()
        generator, n_files = fluxes_muongun.harvest_generators(
            infiles,
            n_files=dataset_n_files,
            equal_generators=muongun_equal_generator)

    elif dataset_type == 'corsika':
        fluxes, flux_names = fluxes_corsika.get_fluxes_and_names()

        n_files = len([f for f in infiles if 'gcd' not in f.lower()])
        if use_from_simprod:
            generator = from_simprod(dataset_number) * dataset_n_files
        else:
            generator = None

    elif dataset_type in ['nugen', 'genie']:
        fluxes, flux_names = fluxes_neutrino.get_fluxes_and_names()

        generator = None
        n_files = len([f for f in infiles if 'gcd' not in f.lower()])

    else:
        raise ValueError('Unkown dataset_type: {!r}'.format(dataset_type))

    # check if found number of events seems reasonable
    perform_check = False
    if isinstance(check_n_files, bool):
        perform_check = check_n_files
    else:
        if dataset_type in check_n_files:
            perform_check = True
    if perform_check:
        assert n_files == dataset_n_files, \
            'N_files do not match: {!r} != {!r}'.format(n_files,
                                                        dataset_n_files)

    # Use the number of found generators for MuonGun
    if dataset_type == 'muongun':
        dataset_n_files = n_files

    tray.AddModule(
        AddWeightMetaData,
        'AddWeightMetaData',
        NFiles=dataset_n_files,
        NEventsPerRun=dataset_n_events_per_run,
        Key=key,
    )

    tray.AddSegment(
        do_the_weighting,
        'do_the_weighting',
        fluxes=fluxes,
        flux_names=flux_names,
        dataset_type=dataset_type,
        dataset_n_files=dataset_n_files,
        generator=generator,
        key=key,
    )

    if add_mceq_weights and dataset_type in ['nugen']:
        from ic3_labels.weights.modules import AddMCEqWeights

        tray.AddModule(AddMCEqWeights,
                       'AddMCEqWeights',
                       n_files=dataset_n_files,
                       **mceq_kwargs)

    if add_nuveto_pf and dataset_type in ['nugen']:
        from ic3_labels.weights.modules import AddNuVetoPassingFraction

        tray.AddModule(AddNuVetoPassingFraction, 'AddNuVetoPassingFraction',
                       **nuveto_kwargs)

    if add_mese_weights and dataset_type in ['muongun', 'nugen', 'genie']:
        from ic3_labels.weights.mese_weights import MESEWeights

        tray.AddModule(
            MESEWeights,
            'MESEWeights',
            DatasetType=dataset_type,
            DatasetNFiles=dataset_n_files,
            DatasetNEventsPerRun=dataset_n_events_per_run,
            OutputKey='{}_mese'.format(key),
        )

    if add_atmospheric_self_veto and dataset_type in ['nugen', 'genie']:
        from ic3_labels.weights import self_veto

        tray.AddModule(
            self_veto.AtmosphericSelfVetoModule,
            'AtmosphericSelfVetoModule',
            DatasetType=dataset_type,
        )
    #     return False
    #    else:
    return True


def print_info(phy_frame):
    print(
        'run_id {} ev_id {} dep_E {} classification {}  signature {} track_length {}'
        .format(phy_frame['I3EventHeader'].run_id,
                phy_frame['I3EventHeader'].event_id, phy_frame['depE'].value,
                phy_frame['classification'].value,
                phy_frame['signature'].value, phy_frame['track_length'].value))
    return


generator = 1000 * weighting.from_simprod(
    11499) + 1785 * weighting.from_simprod(11362)
flux = GaisserH4a()


def add_weighted_primary(phy_frame):
    if not 'MCPrimary' in phy_frame.keys():
        get_weighted_primary(phy_frame)
    return


def corsika_weight(phy_frame):
    if 'I3MCWeightDict' in phy_frame:
        return
    energy = phy_frame['MCPrimary'].energy
    ptype = phy_frame['MCPrimary'].pdg_encoding
    weight = flux(energy, ptype) / generator(energy, ptype)
예제 #20
0
        get_weighted_primary(phy_frame)
    return


def get_stream(phy_frame):
    if reco_q.is_data(phy_frame):
        return True
    return True


#    if (phy_frame['I3EventHeader'].sub_event_stream == 'Final') & (phy_frame['I3EventHeader'].sub_event_id==0):
#        return True
#    else:
#        return False

soft = weighting.from_simprod(11029)
hard_lowE = weighting.from_simprod(11069)
hard_highE = weighting.from_simprod(11070)
generator = 1000 * soft + 1000 * hard_lowE + 1000 * hard_highE
unit = I3Units.cm2 / I3Units.m2


def calc_gen_ow(frame):
    if 'I3MCWeightDict' not in frame:
        return True
    if reco_q.is_data(frame):
        return True
    gen_w = generator(frame['MCPrimary1'].energy,
                      frame['I3MCWeightDict']['PrimaryNeutrinoType'],
                      np.cos(frame['MCPrimary1'].dir.zenith))
    pint = frame['I3MCWeightDict']['TotalWeight']
예제 #21
0
        return True
    else:
        return False


def avg_paraboloid(phy_frame):
    if 'SplineMPEParaboloidFitParams' not in phy_frame.keys():
        return
    err1 = phy_frame['SplineMPEParaboloidFitParams'].pbfErr1
    err2 = phy_frame['SplineMPEParaboloidFitParams'].pbfErr2
    avg_paraboloid = np.hypot(err1, err2) / np.sqrt(2)
    phy_frame.Put("avg_paraboloid", dataclasses.I3Double(avg_paraboloid))
    return


soft = weighting.from_simprod(11029)
hard_lowE = weighting.from_simprod(11069)
hard_highE = weighting.from_simprod(11070)
generator = 3190 * soft + 3920 * hard_lowE + 997 * hard_highE
unit = I3Units.cm2 / I3Units.m2


def calc_gen_ow(frame):
    if reco_q.is_data(frame):
        return True
    gen_w = generator(frame['MCPrimary1'].energy,
                      frame['I3MCWeightDict']['PrimaryNeutrinoType'],
                      np.cos(frame['MCPrimary1'].dir.zenith))
    pint = frame['I3MCWeightDict']['TotalWeight']
    ow = pint / gen_w / unit
    print('ow {}'.format(ow))
예제 #22
0
from icecube import icetray, dataclasses, dataio
from icecube.hdfwriter import I3HDFWriter
from icecube.weighting import weighting
from I3Tray import I3Tray

from compare_oneweight import get_random_filename
import numpy

icetray.logging.set_level_for_unit('Python', 'INFO')

tray = I3Tray()

tray.AddModule('I3Reader', 'reader', filenamelist=[get_random_filename(opts.dataset)])
# do an end run around the cache. do not do this in production. ever.
opts.dataset = weighting.from_simprod(opts.dataset)

from icecube.weighting import CORSIKAWeightCalculator
from icecube.weighting import fluxes
tray.AddModule(CORSIKAWeightCalculator, 'GaisserH3aWeight', Dataset=opts.dataset, Flux=fluxes.GaisserH3a())

def check_weight(frame):
    assert 'GaisserH3aWeight' in frame
    weight = frame['GaisserH3aWeight'].value
    icetray.logging.log_info('GaisserH3aWeight: %f' % (weight))
    assert numpy.isfinite(weight)
    assert weight > 0

tray.Add(check_weight, Streams=[icetray.I3Frame.DAQ])

tray.AddModule('TrashCan', 'YesWeCan')
예제 #23
0
        selection_mask *= cut_dict[key]

    df = df[selection_mask]

    # Calculate simulation event weights
    print('\nCalculating simulation event weights...\n')
    simlist = np.unique(df['sim'])
    num_files_dict = {
        '7006': 30000,
        '7007': 30000,
        '7579': 12000,
        '7784': 12000
    }
    for i, sim in enumerate(simlist):
        if i == 0:
            generator = num_files_dict[sim] * from_simprod(int(sim))
        else:
            generator += num_files_dict[sim] * from_simprod(int(sim))
    MC_energy = df['MC_energy'].values
    MC_type = df['MC_type'].values
    flux = GaisserH3a()
    df['weights_H3a'] = flux(MC_energy, MC_type) / \
        generator(MC_energy, MC_type)
    flux = GaisserH4a()
    df['weights_H4a'] = flux(MC_energy, MC_type) / \
        generator(MC_energy, MC_type)

    # Train classifier
    feature_list = [
        'reco_log_energy', 'InIce_charge', 'reco_cos_zenith', 'lap_chi2',
        'MC_log_energy', 'weights_H3a'
예제 #24
0
### where the files are
corsika = '/home/fhuang/scratch/fhuang/corsika/Matt_L3_corsika_11058.hdf5'
iccdata = '/data/ana/LE/unblinded/DRAGON_NuTau_appearance/muons_icc/Matt_L5b_icc_data_IC86_2_3_4_new_reco_def2.hdf5'

### open them
cor = tables.open_file (corsika, 'r')
icc = tables.open_file (iccdata, 'r')

############################################
#### calculate all the weights (may as well)
############################################
### corsika weighting
##  flux model
flux = GaisserH4a()
##  generator
generator = from_simprod(11058)
generator *= 32000.
##  truth info
energy = np.array (cor.root.CorsikaWeightMap.cols.PrimaryEnergy[:])
ptype  = np.array (cor.root.CorsikaWeightMap.cols.PrimaryType[:])
##  get weights
cor_weights = flux(energy, ptype)/generator(energy, ptype)

### icc data weighting
##  all icc background are scaled by 0.146
dragon_livetime = 2.5 * 3600. * 365.24 * 24.
icc_length  = len (icc.root.IC86_Dunkman_L6_PegLeg_MultiNest8D_NumuCC.cols.energy)
icc_weights = np.ones (icc_length) / 0.146 / dragon_livetime

############################################
#### Start pickling L4, 5, 6
예제 #25
0
        return False
    else:
        return True


def print_info(phy_frame):
    print(
        'run_id {} ev_id {} dep_E {} classification {}  signature {} track_length {}'
        .format(phy_frame['I3EventHeader'].run_id,
                phy_frame['I3EventHeader'].event_id, phy_frame['depE'].value,
                phy_frame['classification'].value,
                phy_frame['signature'].value, phy_frame['track_length'].value))
    return


low_e = weighting.from_simprod(11499)
high_e = weighting.from_simprod(11057)
generator = 1000 * low_e + 635 * high_e
flux = GaisserH4a()


def add_weighted_primary(phy_frame):
    if not 'MCPrimary' in phy_frame.keys():
        get_weighted_primary(phy_frame, MCPrimary='MCPrimary')
    return


def corsika_weight(phy_frame):
    if 'I3MCWeightDict' in phy_frame:
        return
    energy = phy_frame['MCPrimary'].energy