예제 #1
0
def get_interaction_neutrino_rec(frame,
                                 primary,
                                 convex_hull=None,
                                 extend_boundary=0):
    """Get the first neutrino daughter of a primary neutrino, that interacted
    inside the convex hull.

    The I3MCTree is traversed to find the first interaction inside the convex
    hull.

    Parameters
    ----------
    frame : I3Frame
        Current I3Frame needed to retrieve I3MCTree
    primary : I3Particle
        Primary Nu Particle for which the cascade interaction is returned.
    convex_hull : scipy.spatial.ConvexHull, optional
        Defines the desired convex volume.
        If None, the IceCube detector volume is assumed.
    extend_boundary : float, optional
        Extend boundary of IceCube detector by this distance [in meters].
        This option is only used if convex_hull is None, e.g. if the IceCube
        detector is used.

    Returns
    -------
    I3Particle, None
        Returns None if no interaction exists inside the convex hull
        Returns the found neutrino as an I3Particle.
    """
    if primary is None:
        return None

    mctree = frame['I3MCTree']

    # traverse I3MCTree until first interaction inside the convex hull is found
    daughters = mctree.get_daughters(primary)

    # No daughters found, so no interaction
    if len(daughters) is 0:
        return None

    # check if interaction point is inside
    if convex_hull is None:
        point_inside = geometry.is_in_detector_bounds(
            daughters[0].pos, extend_boundary=extend_boundary)
    else:
        point_inside = geometry.point_is_inside(
            convex_hull,
            (daughters[0].pos.x, daughters[0].pos.y, daughters[0].pos.z))

    if point_inside:
        # interaction is inside the convex hull: neutrino found!
        if primary.is_neutrino:
            return primary
        else:
            return None

    else:
        # daughters are not inside convex hull.
        # Either one of these daughters has secondary partcles which has an
        # interaction inside, or there is no interaction within the convex hull

        interaction_neutrinos = []
        for n in daughters:
            # check if this neutrino has interaction inside the convex hull
            neutrino = get_interaction_neutrino_rec(frame, n, convex_hull,
                                                    extend_boundary)
            if neutrino is not None:
                interaction_neutrinos.append(neutrino)

        if len(interaction_neutrinos) is 0:
            # No neutrinos interacting in the convex hull could be found.
            return None

        if len(interaction_neutrinos) > 1:
            print(interaction_neutrinos)
            raise ValueError('Expected only one neutrino to interact!')

        # Found a neutrino that had an interaction inside the convex hull
        return interaction_neutrinos[0]
예제 #2
0
def get_labels(frame,
               convex_hull,
               domPosDict,
               primary,
               pulse_map_string='InIcePulses',
               mcpe_series_map_name='I3MCPESeriesMap',
               is_muongun=False):
    '''Function to get extensive labels for muons, primary and general event
    data.

    Parameters
    ----------
    frame : frame

    convex_hull : scipy.spatial.ConvexHull
        defining the desired convex volume

    domPosDict : dict
        Dictionary of form (string,key) : (x,y,z)
        for all DOMs.
        string and key are of type int

    primary : I3Particle
        Primary particle

    pulse_map_string : key of pulse map in frame,
        of which the mask should be computed for

    mcpe_series_map_name : key of mcpe series map in frame

    is_muongun : bool
        In case of a MuonGun dataset, the primary neutrino has
        an unknown type and a pdg_encoding of 0.
        Therefore, the I3ParticleID of the primary needs to
        be passed along to sub-functions.
        Technically, this could be done implicity, by setting
        the primary id. However, this will loosen up sanity
        checks. Therefore, an explicit decision to use MuonGun
        is prefered.

    Returns
    -------
    labels : I3MapStringDouble
        Dictionary with all labels
    '''

    if primary is None:
        raise ValueError('Primary does not exist!')

    assert primary.id is not None, 'MuonGunFix will not work if this is not true'

    # Check if MuonGun dataset
    if is_muongun:
        # This loosens up sanity checks, therefore
        # better to use only if it is really a
        # MuonGun set.
        # Should work for all datasets though,
        # as long as a primary exists

        # make sure it is a MuonGun dataset
        assert primary.type_string == 'unknown', 'Expected unknown, got {}'.format(
            primary.type_string)
        assert primary.pdg_encoding == 0, 'Expected 0,got {}'.format(
            primary.pdg_encoding)

        # set primary particle id
        muongun_primary_neutrino_id = primary.id
    else:
        muongun_primary_neutrino_id = None

    # create empty labelDict
    labels = dataclasses.I3MapStringDouble()

    # get misc info
    misc_info = get_misc_information(frame,
                                     domPosDict,
                                     convex_hull,
                                     pulse_map_string=pulse_map_string,
                                     mcpe_series_map_name=mcpe_series_map_name)
    labels.update(misc_info)

    muons_inside = mu_utils.get_muons_inside(frame, convex_hull)
    labels['NoOfMuonsInside'] = len(muons_inside)

    # get muons
    mostEnergeticMuon = mu_utils.get_most_energetic_muon_inside(
        frame, convex_hull, muons_inside=muons_inside)
    highestEDepositMuon = mu_utils.get_highest_deposit_muon_inside(
        frame, convex_hull, muons_inside=muons_inside)
    mostVisibleMuon = mu_utils.get_most_visible_muon_inside(
        frame,
        convex_hull,
        pulse_map_string=pulse_map_string,
        mcpe_series_map_name=mcpe_series_map_name)
    primaryMuon = mu_utils.get_muon_of_inice_neutrino(
        frame, muongun_primary_neutrino_id=muongun_primary_neutrino_id)

    labels['PrimaryMuonExists'] = not (primaryMuon is None)
    labels['VisibleStartingTrack'] = False
    for m in [
            mostEnergeticMuon, highestEDepositMuon, mostVisibleMuon,
            primaryMuon
    ]:
        if m:
            if geometry.is_in_detector_bounds(m.pos, extend_boundary=60):
                labels['VisibleStartingTrack'] = True

    # get labels for most energetic muon
    mostEnergeticMuon_info = get_muon_information(
        frame,
        mostEnergeticMuon,
        domPosDict,
        convex_hull,
        pulse_map_string=pulse_map_string)
    for key in mostEnergeticMuon_info.keys():
        labels['MostEnergeticMuon' + key] = mostEnergeticMuon_info[key]

    # # get labels for highest deposit muon
    # if highestEDepositMuon == mostEnergeticMuon:
    #     highestEDepositMuon_info = mostEnergeticMuon_info
    # else:
    #     highestEDepositMuon_info = get_muon_information(frame,
    #             highestEDepositMuon, domPosDict, convex_hull,
    #             pulse_map_string=pulse_map_string)
    # for key in highestEDepositMuon_info.keys():
    #     labels['HighestEDepositMuon'+key] = highestEDepositMuon_info[key]

    # get labels for most visible muon
    if mostVisibleMuon == mostEnergeticMuon:
        mostVisibleMuon_info = mostEnergeticMuon_info
    else:
        mostVisibleMuon_info = get_muon_information(
            frame,
            mostVisibleMuon,
            domPosDict,
            convex_hull,
            pulse_map_string=pulse_map_string)
    for key in mostVisibleMuon_info.keys():
        labels['MostVisibleMuon' + key] = mostVisibleMuon_info[key]

    # get labels for muon from primary
    if primaryMuon == mostEnergeticMuon:
        primaryMuon_info = mostEnergeticMuon_info
    elif primaryMuon == mostVisibleMuon:
        primaryMuon_info = mostVisibleMuon_info
    else:
        primaryMuon_info = get_muon_information(
            frame,
            primaryMuon,
            domPosDict,
            convex_hull,
            pulse_map_string=pulse_map_string)
    for key in primaryMuon_info.keys():
        labels['PrimaryMuon' + key] = primaryMuon_info[key]

    # get labels for primary particle
    primary_info = get_primary_information(
        frame,
        primary,
        domPosDict,
        convex_hull,
        pulse_map_string=pulse_map_string,
        muongun_primary_neutrino_id=muongun_primary_neutrino_id)
    for key in primary_info.keys():
        labels['Primary' + key] = primary_info[key]

    return labels
예제 #3
0
def get_interaction_neutrino(frame,
                             primary,
                             convex_hull=None,
                             extend_boundary=0,
                             sanity_check=False):
    """Get the first neutrino daughter of a primary neutrino, that interacted
    inside the convex hull.

    The I3MCTree is traversed to find the first interaction inside the convex
    hull.

    Parameters
    ----------
    frame : I3Frame
        Current I3Frame needed to retrieve I3MCTree
    primary : I3Particle
        Primary Nu Particle for which the cascade interaction is returned.
    convex_hull : scipy.spatial.ConvexHull, optional
        Defines the desired convex volume.
        If None, the IceCube detector volume is assumed.
    extend_boundary : float, optional
        Extend boundary of IceCube detector by this distance [in meters].
        This option is only used if convex_hull is None, e.g. if the IceCube
        detector is used.
    sanity_check : bool, optional
        If true, the neutrino is obtained by two different methods and cross
        checked to see if results match.

    Returns
    -------
    I3Particle, None
        Returns None if no interaction exists inside the convex hull
        Returns the found neutrino as an I3Particle.

    Raises
    ------
    ValueError
        Description
    """

    mctree = frame['I3MCTree']

    # get first in ice neutrino
    nu_in_ice = None
    for p in mctree:
        if p.is_neutrino and p.location_type_string == 'InIce':
            nu_in_ice = p
            break

    if nu_in_ice is not None:

        # check if nu_in_ice has interaction inside convex hull
        daughters = mctree.get_daughters(nu_in_ice)
        assert len(daughters) > 0, 'Expected at least one daughter!'

        # check if point is inside
        if convex_hull is None:
            point_inside = geometry.is_in_detector_bounds(
                daughters[0].pos, extend_boundary=extend_boundary)
        else:
            point_inside = geometry.point_is_inside(
                convex_hull,
                (daughters[0].pos.x, daughters[0].pos.y, daughters[0].pos.z))
        if not point_inside:
            nu_in_ice = None

    # ---------------
    # Sanity Check
    # ---------------
    if sanity_check:
        nu_in_ice_rec = get_interaction_neutrino_rec(
            frame=frame,
            primary=primary,
            convex_hull=convex_hull,
            extend_boundary=extend_boundary)

        if nu_in_ice_rec != nu_in_ice:
            if (nu_in_ice_rec is None or nu_in_ice is None
                    or nu_in_ice_rec.id != nu_in_ice.id
                    or nu_in_ice_rec.minor_id != nu_in_ice.minor_id):
                raise ValueError('{} != {}'.format(nu_in_ice_rec, nu_in_ice))
    # ---------------

    return nu_in_ice
예제 #4
0
def get_primary_information(frame,
                            primary,
                            dom_pos_dict,
                            convex_hull,
                            pulse_map_string='InIcePulses',
                            mcpe_series_map_name='I3MCPESeriesMap',
                            muongun_primary_neutrino_id=None):
    '''Function to get labels for the primary

    Parameters
    ----------
    frame : frame

    primary : I3Particle
        Primary particle

    dom_pos_dict : dict
        Dictionary of form (string,key) : (x,y,z)
        for all DOMs.
        string and key are of type int

    convex_hull : scipy.spatial.ConvexHull
        defining the desired convex volume

    pulse_map_string : key of pulse map in frame,
        of which the pulses should be computed for

    mcpe_series_map_name : key of mcpe series map in frame

    muongun_primary_neutrino_id : I3ParticleID
        In case of a MuonGun dataset, the primary neutrino has
        an unknown type and a pdg_encoding of 0.
        Therefore, the I3ParticleID of the primary needs to
        be passed along.

    Returns
    -------
    info_dict : dictionary
        Dictionary with all labels
    '''
    info_dict = {}

    # get labels depending on pulse map
    pulse_map = general.get_pulse_map(
        frame,
        primary,
        pulse_map_string=pulse_map_string,
        mcpe_series_map_name=mcpe_series_map_name)

    NoOfHitDOMs = len(pulse_map.keys())
    NoOfPulses = 0
    TotalCharge = 0.
    COG = np.array([0., 0., 0.])

    if NoOfHitDOMs > 0:
        for key in pulse_map.keys():
            for pulse in pulse_map[key]:
                NoOfPulses += 1
                TotalCharge += pulse.charge
                pos = np.array(dom_pos_dict[(key.string, key.om)])
                COG += pos * pulse.charge
        COG = COG / TotalCharge
    COG = dataclasses.I3Position(*COG)

    COGDistanceToBorder = geometry.distance_to_icecube_hull(COG)
    COGDistanceToDeepCore = geometry.distance_to_deepcore_hull(COG)

    # other labels
    daughters = frame['I3MCTree'].get_daughters(primary)
    codes = [p.pdg_encoding for p in daughters]
    if -13 in codes or 13 in codes:
        # CC Interaction: nu + N -> mu + hadrons
        IsCCInteraction = True
    else:
        # NC Interaction: nu + N -> nu + hadrons
        IsCCInteraction = False

    if geometry.is_in_detector_bounds(daughters[0].pos):
        # Interaction of Primary is in Detector
        IsStartingTrack = True
    else:
        # Interaction outside of Detector
        IsStartingTrack = False
    InDetectorEnergyLoss = get_energy_deposited_including_daughters(
        frame,
        convex_hull,
        primary,
        muongun_primary_neutrino_id=muongun_primary_neutrino_id)

    # add labels to info_dict
    info_dict['NoOfHitDOMs'] = NoOfHitDOMs
    info_dict['NoOfPulses'] = NoOfPulses
    info_dict['TotalCharge'] = TotalCharge

    info_dict['COGDistanceToBorder'] = COGDistanceToBorder
    info_dict['COGDistanceToDeepCore'] = COGDistanceToDeepCore
    info_dict['COGx'] = COG.x
    info_dict['COGy'] = COG.y
    info_dict['COGz'] = COG.z

    info_dict['Azimuth'] = primary.dir.azimuth
    info_dict['Zenith'] = primary.dir.zenith
    info_dict['Energy'] = primary.energy
    info_dict['InDetectorEnergyLoss'] = InDetectorEnergyLoss
    info_dict['IsCCInteraction'] = IsCCInteraction
    info_dict['IsStartingTrack'] = IsStartingTrack

    return info_dict
예제 #5
0
def get_total_deposited_energy(frame,
                               convex_hull=None,
                               extend_boundary=None,
                               cylinder_ext=None):
    """Get total deposited energy in an event.

    Traverses the I3MCTree and collects energies of particles.
    The particles are handled in the following:

        dark particles: ignore
        particles not InIce or in convex hull (if provided): ignore
        neutrinos: ignore
        taus and muons: ignore
            --> energy losses and decay products are collected
            --> ionisation energy losses are disregarded
            --> low energy muons created in cascades are disregarded
        electron, hadrons, ...: collect EM equivalent energy

    Note: the InIce volume is rather large. To provide additional and
    more stringent defintions of the detector volume, a convex hull, an
    extended IceCube boundary, or a simple cut on the radius can be applied.
    In this case, the InIce check will be performed in addition to:

        If convex_hull is not None: check if particle is in convex hull
        If extend_boundary is not None: check if particle is in extended
                                        IceCube boundary.
        If cylinder_ext is not None: check if particle is within the extended
                                     cylinder (z +- 500 + ext, r=500 + ext)


    Parameters
    ----------
    frame : I3Frame
        Current I3Frame.
    convex_hull : scipy.spatial.ConvexHull or None, optional
        Defines the desired convex volume to check whether an energy deposit
        was inside the detector volume.
    extend_boundary : float or None, optional
        Use a convex hull around the IceCube detector and extend it by this
        distance [in meters] to check if an energy deposit was in the detector
    cylinder_ext : float or None, optional
        If provided, energy losses with a radius in x-y > 500 + cylinder_ext
        and abs(z) > 500 + cylinder_ext will be discarded.

    Returns
    -------
    double
        The deposited energy.
    """
    deposited_energy = 0.

    for p in frame["I3MCTree"]:

        # skip dark particles
        if p.shape == dataclasses.I3Particle.ParticleShape.Dark:
            continue

        # skip neutrino: the energy is not visible
        if p.is_neutrino:
            continue

        # skip muons and taus:
        # --> energy losses and decay products are still collected
        # --> ionisation energy losses are disregarded
        # --> low energy muons created in cascades are disregarded
        if p.type in [
                dataclasses.I3Particle.ParticleType.MuPlus,
                dataclasses.I3Particle.ParticleType.MuMinus,
                dataclasses.I3Particle.ParticleType.TauMinus,
                dataclasses.I3Particle.ParticleType.TauPlus
        ]:
            continue

        # Check if the energy deposit was inside the detector.
        # Ignore it, if it was outside.

        if p.location_type != dataclasses.I3Particle.LocationType.InIce:
            # skip particles that are way outside of the detector volume
            continue

        # use a basic cylinder to determine if particle was inside
        if cylinder_ext is not None:
            if (np.abs(p.pos.z) > 500 + cylinder_ext
                    or np.sqrt(p.pos.x**2 + p.pos.y**2) > 500 + cylinder_ext):
                continue

        if convex_hull is not None:
            # use convex hull to determine if inside detector
            if not geometry.point_is_inside(convex_hull,
                                            (p.pos.x, p.pos.y, p.pos.z)):
                continue

        if extend_boundary is not None:
            # use IceCube boundary + extent_boundary [meters] to check
            if not geometry.is_in_detector_bounds(
                    p.pos, extend_boundary=extend_boundary):
                continue

        # scale energy of cascades to EM equivalent
        deposited_energy += convert_to_em_equivalent(p)

    return deposited_energy
예제 #6
0
def get_cascade_of_primary_nu(frame, primary,
                              convex_hull=None,
                              extend_boundary=200,
                              sanity_check=False):
    """Get cascade of a primary particle.

    The I3MCTree is traversed to find the first interaction inside the convex
    hull.

    Parameters
    ----------
    frame : I3Frame
        Current I3Frame needed to retrieve I3MCTree
    primary : I3Particle
        Primary Nu Particle for which the cascade interaction is returned.
    convex_hull : scipy.spatial.ConvexHull, optional
        Defines the desired convex volume.
        If None, the IceCube detector volume is assumed.
    extend_boundary : float, optional
        Extend boundary of IceCube detector by this distance [in meters].
        This option is only used if convex_hull is None, e.g. if the IceCube
        detector is used.
    sanity_check : bool, optional
        If true, the neutrino is obtained by two different methods and cross
        checked to see if results match.

    Returns
    -------
    I3Particle, None
        Returns None if no cascade interaction exists inside the convex hull
        Returns the found cascade as an I3Particle.
        The returned I3Particle will have the vertex, direction and total
        visible energy (EM equivalent) of the cascade. In addition it will
        have the type of the interaction NEUTRINO. The visible energy is
        defined here as the sum of the EM equivalent energies of the  daugther
        particles, unless these are neutrinos.  Only energies of particles
        that have 'InIce' location_type are considered. This meas that
        energies from hadron daughter particles get converted to the EM
        equivalent energy.
        (Does not account for energy carried away by neutrinos of tau decay)
    float
        The total EM equivalent energy of the EM cascade.
    float
        The total EM equivalent energy of the hadronic cascade.
    float
        The total EM equivalent energy in muons and taus (tracks).
    """
    neutrino = get_interaction_neutrino(frame, primary,
                                        convex_hull=convex_hull,
                                        extend_boundary=extend_boundary,
                                        sanity_check=sanity_check)

    if neutrino is None or not neutrino.is_neutrino:
        return None, None, None, None

    mctree = frame['I3MCTree']

    # traverse I3MCTree until first interaction inside the convex hull is found
    daughters = mctree.get_daughters(neutrino)

    # -----------------------
    # Sanity Checks
    # -----------------------
    assert len(daughters) > 0, 'Expected at least one daughter!'

    # check if point is inside
    if convex_hull is None:
        point_inside = geometry.is_in_detector_bounds(
                            daughters[0].pos, extend_boundary=extend_boundary)
    else:
        point_inside = geometry.point_is_inside(convex_hull,
                                                (daughters[0].pos.x,
                                                 daughters[0].pos.y,
                                                 daughters[0].pos.z))
    assert point_inside, 'Expected interaction to be inside defined volume!'
    # -----------------------

    # interaction is inside the convex hull/extension boundary: cascade found!

    # get cascade
    cascade = dataclasses.I3Particle(neutrino)
    cascade.shape = dataclasses.I3Particle.ParticleShape.Cascade
    cascade.dir = dataclasses.I3Direction(primary.dir)
    cascade.pos = dataclasses.I3Position(daughters[0].pos)
    cascade.time = daughters[0].time
    cascade.length = get_interaction_extension_length(frame, neutrino)

    # sum up energies for daughters if not neutrinos
    # tau can immediately decay in neutrinos which carry away energy
    # that would not be visible, this is currently not accounted for
    e_total, e_em, e_hadron, e_track = get_cascade_em_equivalent(
        mctree, neutrino)

    cascade.energy = e_total
    return cascade, e_em, e_hadron, e_track
예제 #7
0
def get_track_energy_depositions(mc_tree,
                                 track,
                                 num_to_remove,
                                 correct_for_em_loss=True,
                                 energy_threshold=1.,
                                 extend_boundary=None):
    """Get a list of track energy updates and a number of highest energy
    cascades that were removed from the track.

    Note: this function has a lot of additional code and asserts to verify
    that the assumptions made hold. The I3MCTree is not well specified and
    may change between software revisions. In this case, the asserts will help
    in letting this crash loudly.
    The main driving assumption is that the corresponding track update particle
    has a minor particle ID +1 from the stochastic loss. This is checked via
    asserts on the delta time and position.

    Parameters
    ----------
    mc_tree : I3MCTree
        The I3MCTree.
    track : I3Particle.
        The track particle (usually a muon or tau) for which to create
        the energy loss plots
    num_to_remove : int
        Number of energy losses to remove. The n highest energy depositions
        will be removed from the track energy losses and instead be handled
        as separate cascades.
    correct_for_em_loss : bool, optional
        If True, energy depositions will be in terms of EM equivalent deposited
        energy.
        If False, the actual (but possibly invisible) energy depositions is
        used..
    energy_threshold : float, optional
        The energy threshold under which an energy loss is considered to be
        removed from the track.
    extend_boundary : float, optional
        If provided only energy losses within convex hull + extend boundary
        are accepted and considered.

    Raises
    ------
    NotImplementedError
        Description

    Returns
    -------
    dict
        update_distances : array_like
            The distances for the energy updates wrt the muon vertex.
        update_energies : array_like
            The energies for at the energy update positions.
        cascades : list of I3Particle
            List of removed cascades. This list is sorted from highest to lowest
            energies.
            Note this list may be smaller than `num_to_remove` if the number of
            energy losses of the muon are smaller than this number.
        track_updates : List of I3Particle
            List of track updates.
        relative_energy_losses : array_like
            The relative energy loss (momentum transfer q) of each cascade
            energy deposition. Same length as `cascades`
    """

    # sanity check
    assert num_to_remove >= 0

    # Other tracks such at taus might require additional handling of edge
    # cases. Remove for now
    if track.type not in [
            dataclasses.I3Particle.MuMinus, dataclasses.I3Particle.MuPlus
    ]:
        raise NotImplementedError('Particle type {} not yet supported'.format(
            track.type))

    # get all daughters of track
    daughters = mc_tree.get_daughters(track)

    # gather all track updates
    # (these define rest track energy at a certain point)
    update_distances = []
    update_times = []
    update_energies = []
    update_ids = []
    track_updates = []
    stoch_daughters = []
    stoch_energies = []
    last_update_outside = None
    track_entered_volume = False
    for index, daughter in enumerate(daughters):

        # check if these points are inside defined volume
        if extend_boundary is not None:

            # due to slight deviations in particle positions of the
            # corresponding track updates for each stochastic loss it
            # can happen that the track update is just outside the
            # defined volume while the stochastic loss is just inside.
            # We want to avoid this and make sure that the track update
            # is always inside (it does not hurt much if only the
            # stochastic loss falls outside)
            if daughter.type == track.type:
                eps_boundary = 0.1
            else:
                eps_boundary = 0.

            # use IceCube boundary + extent_boundary [meters] to check
            if not geometry.is_in_detector_bounds(
                    daughter.pos,
                    extend_boundary=extend_boundary + eps_boundary):
                if daughter.type == track.type:
                    if not track_entered_volume:
                        last_update_outside = daughter
                continue

        track_entered_volume = True

        if daughter.type == track.type:

            # this is probably a track segment updated
            update_distances.append((daughter.pos - track.pos).magnitude)
            update_energies.append(daughter.energy)
            update_times.append(daughter.time)
            update_ids.append(daughter.id.minorID)
            track_updates.append(daughter)
        else:
            stoch_daughters.append(daughter)
            stoch_energies.append(daughter.energy)

    update_distances = np.array(update_distances)
    update_energies = np.array(update_energies)
    update_times = np.array(update_times)
    update_ids = np.array(update_ids)

    # check that everything is sorted
    assert (np.diff(update_distances) >= 0).all()
    assert (np.diff(update_times) >= 0).all()
    assert (np.diff(update_ids) > 0).all()

    # find the n highest energy depositions and remove these
    indices = np.argsort(stoch_energies)
    sorted_stoch_daughters = [stoch_daughters[i] for i in indices]
    num_removed = min(
        num_to_remove,
        len([d for d in stoch_daughters if d.energy > energy_threshold]))

    if num_removed == 0:
        cascades = []
        cascades_left = sorted_stoch_daughters
    elif num_removed == len(sorted_stoch_daughters):
        cascades = sorted_stoch_daughters[::-1]
        cascades_left = []
    else:
        cascades_left = sorted_stoch_daughters[:-num_removed]
        cascades = sorted_stoch_daughters[-num_removed:][::-1]
    assert len(cascades) == num_removed

    # keep track of returned energy
    returned_energy = 0

    # keep track of unaccounted daughters, e.g. energy losses that do not have
    # a matching track update. This should only happen for the decay point
    unaccounted_daughters = []

    # compute relative energy loss (momentum transfer q) of each cascade
    relative_energy_losses = []

    if len(update_distances) > 0:
        # values for sanity check
        previous_energy = float(update_energies[-1])

        # fix the track updates by adding back the energy from the
        # removed cascades
        for cascade in cascades:

            # find the track update at the point of the stochastic loss
            index = get_update_index(update_times, update_energies, update_ids,
                                     cascade)

            # the index should only be None if this cascade is part of the
            # decay products, e.g. at the end of the track
            if index is None and np.allclose(
                    cascade.time, daughters[-1].time, atol=1e-2):
                unaccounted_daughters.append((cascade, True))

                # we would need to consider the continous losses to estimate
                # the relative energy loss. Instead of doing this, we'll just
                # add a NaN for now.
                relative_energy_losses.append(float('nan'))
            else:
                assert index is not None
                assert np.allclose(update_times[index],
                                   cascade.time,
                                   atol=1e-2)
                assert np.allclose(update_distances[index],
                                   (cascade.pos - track.pos).magnitude,
                                   atol=1e-1)

                # the energy of the muon update is already reduced by the loss.
                # To obtain the muon energy prior to the loss, we need to add
                # it back
                relative_energy_losses.append(
                    cascade.energy /
                    (cascade.energy + track_updates[index].energy))

                # update all of the remaining track updates
                # (add energy back since we assume this did not get depsosited)
                update_energies[index:] += cascade.energy

                # keep track of returned energy
                returned_energy += cascade.energy

        # sanity checks
        assert np.allclose(update_energies[-1] - returned_energy,
                           previous_energy)
        assert (np.diff(update_energies) <= 1e-4).all()

    else:

        # No track updates exist. We would need to consider the continous
        # losses to estimate the relative energy loss. Instead of doing this,
        # we'll just add NaNs for now.
        for cascade in cascades:
            relative_energy_losses.append(float('nan'))

    relative_energy_losses = np.array(relative_energy_losses)
    assert len(relative_energy_losses) == len(cascades)

    # Now walk through the leftover stochastic energy losses and make sure
    # that they are all covered by the track updates, possibly correct
    # for EM equivalent light yield if `correct_for_em_loss` is set to True.
    for daughter in cascades_left:

        # distance to stochastic energy loss
        distance = (daughter.pos - track.pos).magnitude

        # find the track update at the point of the stochastic loss
        index = get_update_index(update_times, update_energies, update_ids,
                                 daughter)
        if index is not None:

            # perform some sanity checks
            assert np.allclose(update_times[index], daughter.time, atol=1e-2)
            assert np.allclose(
                update_distances[index],
                (daughter.pos - track.pos).magnitude,
                atol=0.1,
            )

            # sanity check to see if energy loss is included
            if index == 0:
                if last_update_outside is None:
                    # Sometimes there are no muons inserted previous to
                    # the first stochastic energy loss.
                    # use the track energy in this case
                    previous_energy = track.energy
                else:
                    previous_energy = last_update_outside.energy
            else:
                previous_energy = update_energies[index - 1]
            delta_energy = previous_energy - update_energies[index]
            assert delta_energy >= daughter.energy - 1e-3

            if correct_for_em_loss:
                em_energy = convert_to_em_equivalent(daughter)
                delta_energy = daughter.energy - em_energy

                assert delta_energy > -1e-7
                delta_energy = np.clip(delta_energy, 0., np.inf)

                # need to update additional delta_energy form
                # update all of the remaining track updates
                # (add energy back since we assume this did not get depsosited)
                update_energies[index:] += delta_energy
                # keep track of returned energy
                returned_energy += delta_energy

        else:
            # This seems to be an unaccounted stochastic energy loss
            # These should only be at end of track when muon decays
            # or in some unlucky cases in which the track update happens
            # to get cut away, while the stochastic energy is still inside.
            # However, we account for the latter case by increasing the
            # convex hull when checking for contained track updates.
            assert np.allclose(daughter.time, daughters[-1].time, atol=1e-2)
            unaccounted_daughters.append((daughter, False))

    # If there are unnaccounted stochastic energy losses, make sure these
    # are the particle decay
    if len(unaccounted_daughters) > 0:
        assert len(unaccounted_daughters) == 3
        assert unaccounted_daughters[0][0].pos == \
            unaccounted_daughters[1][0].pos
        assert unaccounted_daughters[0][0].pos == \
            unaccounted_daughters[2][0].pos

        # add an update distance with the rest of the deposited energy
        if len(update_energies) == 0:

            # this should only be the case if the only energy losses in the
            # I3MCTree are the ones from the decay
            assert len(stoch_daughters) == 3
            previous_energy = track.energy
        else:
            previous_energy = update_energies[-1]

        energy_dep = previous_energy - returned_energy

        # subtract off energy carried away by neutrinos or not visible
        for daughter, is_accounted_for in unaccounted_daughters:
            if daughter.type in [
                    dataclasses.I3Particle.NuE,
                    dataclasses.I3Particle.NuMu,
                    dataclasses.I3Particle.NuTau,
                    dataclasses.I3Particle.NuEBar,
                    dataclasses.I3Particle.NuMuBar,
                    dataclasses.I3Particle.NuTauBar,
            ] or is_accounted_for:
                energy_dep -= daughter.energy

            elif correct_for_em_loss:
                em_energy = convert_to_em_equivalent(daughter)
                delta_energy = daughter.energy - em_energy
                energy_dep -= delta_energy

        assert energy_dep <= previous_energy

        update_distances = np.append(
            update_distances,
            (track.pos - unaccounted_daughters[0][0].pos).magnitude)
        update_energies = np.append(update_energies,
                                    previous_energy - energy_dep)

    # If there is only one track update in the detector, prepend the last one
    # before the detector
    if len(update_distances) == 1:

        # add last existing track update if it exists
        if last_update_outside is not None:

            distance = (track.pos - last_update_outside.pos).magnitude
            energy = last_update_outside.energy

            update_distances = np.insert(update_distances, 0, distance)
            update_energies = np.insert(update_energies, 0, energy)
            track_updates = [last_update_outside] + track_updates

        # otherwise add the starting track position and energy
        else:
            update_distances = np.insert(update_distances, 0, 0.)
            update_energies = np.insert(update_energies, 0, track.energy)
            track_updates = [track] + track_updates

    # energies should be monotonously decreasing except if updates are
    # extremely close to each other
    assert (np.diff(update_distances)[np.diff(update_energies) >= 0] <
            1e-1).all()

    # Fix monoticity of energy updates that might have gotten broken due
    # to numerical issues
    energy_corrections = np.diff(update_energies)
    mask = energy_corrections <= 0.
    energy_corrections[mask] = 0.
    assert (np.abs(energy_corrections) <= 1e-2).all()
    update_energies[1:] -= energy_corrections

    assert (np.diff(update_energies) <= 0).all()
    assert (np.all(update_energies) >= 0)
    assert (np.diff([c.energy for c in cascades]) < 0).all()

    return {
        'update_distances': update_distances,
        'update_energies': update_energies,
        'cascades': cascades,
        'track_updates': track_updates,
        'relative_energy_losses': relative_energy_losses,
    }
    def __call__(self, bias_data):
        """Apply Bias Function

        Parameters
        ----------
        bias_data : dict
            Dictionary of bias input data.
            Contents may include:
            {
                'frame': the current I3Frame,
            }

        Returns
        -------
        float
            Keep probability: probability with which this event should be kept.
        """

        frame = bias_data['frame']

        # get primary
        mc_tree = frame[self.mctree_name]
        primaries = mc_tree.get_primaries()
        assert len(primaries) == 1, 'Expected only 1 Primary!'

        # get muon
        muon = mu_utils.get_muon(
            frame,
            primaries[0],
            detector.icecube_hull,
            mctree_name=self.mctree_name,
        )

        if muon is None:

            # if muon did not hit the convex hull, or if no muon exists,
            # it will be None. In this case we set default values
            found_muon = False
            cos_zen = np.cos(primaries[0].dir.zenith)
            track_length = 0.
            max_rel_loss = 0.

        else:
            found_muon = True
            cos_zen = np.cos(muon.dir.zenith)
            track_length = mu_utils.get_muon_track_length_inside(
                muon, detector.icecube_hull)

            # get muon energy losses
            losses = [
                loss for loss in mc_tree.get_daughters(muon)
                if not mu_utils.is_muon(loss) and
                geometry.is_in_detector_bounds(loss.pos, extend_boundary=60)
            ]

            # compute relative energy losses
            rel_losses = []
            loss_energies = []
            for loss in losses:

                # get energy of muon prior to energy loss
                distance = (muon.pos - loss.pos).magnitude
                energy = mu_utils.get_muon_energy_at_distance(
                    frame, muon, np.clip(distance - 1, 0., float('inf')))

                # If the loss is at the muon decay point, the returned energy
                # might be NaN, assert this and set default value of 1 GeV
                if not np.isfinite(energy):
                    assert np.abs(distance - muon.length) < 1, (energy, muon)
                    energy = 1

                rel_loss = loss.energy / energy
                if rel_loss > 1. or rel_loss < 0.:
                    msg = 'Found out of bounds rel_loss: {:3.3f}. '.format(
                        rel_loss)
                    msg += 'Clipping value to [0, 1]'
                    log_warn(msg)
                    rel_loss = np.clip(rel_loss, 0., 1.)

                loss_energies.append(loss.energy)
                rel_losses.append(rel_loss)
            if rel_losses:
                max_rel_loss = rel_losses[np.argmax(loss_energies)]
            else:
                max_rel_loss = 0.

        # bias based on zenith
        if self.cos_zenith_sigmoid_scale is None:
            zenith_keep_prob = 1.0
        else:
            zenith_keep_prob = self.sigmoid(
                -cos_zen,
                s=self.cos_zenith_sigmoid_scale,
                b=self.cos_zenith_sigmoid_bias,
            )

        # bias based on in detector track length
        if self.track_length_sigmoid_scale is None:
            track_length_prob = 1.0
        else:
            track_length_prob = self.sigmoid(
                track_length,
                s=self.track_length_sigmoid_scale,
                b=self.track_length_sigmoid_bias,
            )

        # bias based on maximum relative energy loss
        if self.muon_loss_sigmoid_scale is None:
            max_rel_loss_prob = 1.
        else:
            max_rel_loss_prob = self.sigmoid(
                max_rel_loss,
                s=self.muon_loss_sigmoid_scale,
                b=self.muon_loss_sigmoid_bias,
            )

        bias_info = {
            'found_muon': found_muon,
            'cos_zenith': cos_zen,
            'track_length_in_detector': track_length,
            'max_relative_energy_loss': max_rel_loss,
        }

        keep_prob = zenith_keep_prob * track_length_prob * max_rel_loss_prob
        return keep_prob, bias_info