Esempio n. 1
0
class decoherence(PiStage):
    """
    PISA Pi stage representing oscillations in the presence of decoherence

    Paramaters
    ----------

    Uses the standard parameters as required by a PISA pi stage (see `pisa/core/pi_stage.py`)

    Expected contents of `params` ParamSet:
        detector_depth : float
        earth_model : PREM file path
        prop_height : quantity (dimensionless)
        YeI : quantity (dimensionless)
        YeO : quantity (dimensionless)
        YeM : quantity (dimensionless)
        theta12 : quantity (angle)
        theta13 : quantity (angle)
        theta23 : quantity (angle)
        deltam21 : quantity (mass^2)
        deltam31 : quantity (mass^2)
        deltacp : quantity (angle)
        gamma12 : quantity (energy)
        gamma13 : quantity (energy)
        gamma23 : quantity (energy)
    """
    def __init__(
        self,
        data=None,
        params=None,
        input_names=None,
        output_names=None,
        debug_mode=None,
        input_specs=None,
        calc_specs=None,
        output_specs=None,
    ):

        expected_params = (
            'detector_depth',
            'earth_model',
            'prop_height',
            'YeI',
            'YeO',
            'YeM',
            'theta12',
            'theta13',
            'theta23',
            'deltam21',
            'deltam31',
            'deltacp',
            'gamma21',
            'gamma31',
            'gamma32',
        )

        input_names = ()
        output_names = ()

        # what are the keys used from the inputs during apply
        input_apply_keys = (
            'weights',
            'sys_flux',
        )
        # what are keys added or altered in the calculation used during apply
        output_calc_keys = (
            'prob_e',
            'prob_mu',
        )
        # what keys are added or altered for the outputs during apply
        output_apply_keys = ('weights', )

        # init base class
        super(decoherence, self).__init__(
            data=data,
            params=params,
            expected_params=expected_params,
            input_names=input_names,
            output_names=output_names,
            debug_mode=debug_mode,
            input_specs=input_specs,
            calc_specs=calc_specs,
            output_specs=output_specs,
            input_apply_keys=input_apply_keys,
            output_calc_keys=output_calc_keys,
            output_apply_keys=output_apply_keys,
        )

        #Have not yet implemented matter effects
        if self.params.earth_model.value is not None:
            raise ValueError(
                "Matter effects not yet implemented for decoherence, must set 'earth_model' to None"
            )

        assert self.input_mode is not None
        assert self.calc_mode is not None
        assert self.output_mode is not None

        self.layers = None

        #Toggle between 2-flavor and 3-flavor models
        self.two_flavor = False

    def setup_function(self):

        # setup Earth model
        if self.params.earth_model.value is not None:
            earth_model = find_resource(self.params.earth_model.value)
            YeI = self.params.YeI.value.m_as('dimensionless')
            YeO = self.params.YeO.value.m_as('dimensionless')
            YeM = self.params.YeM.value.m_as('dimensionless')
        else:
            earth_model = None

        # setup the layers
        prop_height = self.params.prop_height.value.m_as('km')
        detector_depth = self.params.detector_depth.value.m_as('km')
        self.layers = Layers(earth_model, detector_depth, prop_height)
        if earth_model is not None:
            self.layers.setElecFrac(YeI, YeO, YeM)

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        # --- calculate the layers ---
        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            # as layers don't care about flavour
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc', 'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                'nuebar_nc', 'numubar_nc', 'nutaubar_nc'
            ])

        for container in self.data:
            if self.params.earth_model.value is not None:
                self.layers.calcLayers(container['true_coszen'].get('host'))
                container['densities'] = self.layers.density.reshape(
                    (container.size, self.layers.max_layers))
                container['distances'] = self.layers.distance.reshape(
                    (container.size, self.layers.max_layers))
            else:
                self.layers.calcPathLength(
                    container['true_coszen'].get('host'))
                container['distances'] = self.layers.distance

        # don't forget to un-link everything again
        self.data.unlink_containers()

        # --- setup empty arrays ---
        if self.calc_mode == 'binned':
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc'
            ])
            self.data.link_containers('nubar', [
                'nuebar_cc', 'numubar_cc', 'nutaubar_cc', 'nuebar_nc',
                'numubar_nc', 'nutaubar_nc'
            ])
        for container in self.data:
            container['probability'] = np.empty((container.size, 3, 3),
                                                dtype=FTYPE)
        self.data.unlink_containers()

        # setup more empty arrays
        for container in self.data:
            container['prob_e'] = np.empty((container.size), dtype=FTYPE)
            container['prob_mu'] = np.empty((container.size), dtype=FTYPE)

    @profile
    def compute_function(self):

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc'
            ])
            self.data.link_containers('nubar', [
                'nuebar_cc', 'numubar_cc', 'nutaubar_cc', 'nuebar_nc',
                'numubar_nc', 'nutaubar_nc'
            ])

        # --- update params ---
        self.decoh_params = DecoherenceParams(
            deltam21=self.params.deltam21.value,
            deltam31=self.params.deltam31.value,
            theta12=self.params.theta12.value,
            theta13=self.params.theta13.value,
            theta23=self.params.theta23.value,
            deltacp=self.params.deltacp.value,
            gamma21=self.params.gamma21.value,
            gamma31=self.params.gamma31.value,
            gamma32=self.params.gamma32.value)

        # Calculate oscillation probabilities
        for container in self.data:
            self.calc_probs(
                container['nubar'],
                container['true_energy'],
                #container['densities'],
                container['distances'],
                out=container['probability'],
            )

        # the following is flavour specific, hence unlink
        self.data.unlink_containers()

        for container in self.data:
            # initial electrons (0)
            fill_probs(
                container['probability'].get(WHERE),
                0,  # electron
                container['flav'],
                out=container['prob_e'].get(WHERE),
            )
            # initial muons (1)
            fill_probs(
                container['probability'].get(WHERE),
                1,  # muon
                container['flav'],
                out=container['prob_mu'].get(WHERE),
            )

            container['prob_e'].mark_changed(WHERE)
            container['prob_mu'].mark_changed(WHERE)

    @profile
    def apply_function(self):

        # update the outputted weights
        for container in self.data:
            apply_probs(container['sys_flux'].get(WHERE),
                        container['prob_e'].get(WHERE),
                        container['prob_mu'].get(WHERE),
                        out=container['weights'].get(WHERE))
            container['weights'].mark_changed(WHERE)

    def calc_probs(self, nubar, e_array, len_array, out):

        #Get the probability values output array
        prob_array = out.get(WHERE)

        #Attach units
        L = len_array.get(WHERE) * ureg["km"]
        E = e_array.get(WHERE) * ureg["GeV"]

        #nue
        calc_decoherence_probs(decoh_params=self.decoh_params,
                               flav="nue",
                               energy=E,
                               baseline=L,
                               prob_e=prob_array[:, 0, 0],
                               prob_mu=prob_array[:, 0, 1],
                               prob_tau=prob_array[:, 0, 2],
                               two_flavor=self.two_flavor)

        #numu
        calc_decoherence_probs(decoh_params=self.decoh_params,
                               flav="numu",
                               energy=E,
                               baseline=L,
                               prob_e=prob_array[:, 1, 0],
                               prob_mu=prob_array[:, 1, 1],
                               prob_tau=prob_array[:, 1, 2],
                               two_flavor=self.two_flavor)

        #nutau (basically just the inverse of the numu case)
        np.copyto(dst=prob_array[:, 2, 0], src=prob_array[:, 1, 0])
        np.copyto(dst=prob_array[:, 2, 1], src=prob_array[:, 1, 2])
        np.copyto(dst=prob_array[:, 2, 2], src=prob_array[:, 1, 1])

        #Register that arrays have changed
        out.mark_changed(WHERE)
Esempio n. 2
0
class pi_prob3(PiStage):
    """
    prob3 osc PISA Pi class

    Paramaters
    ----------
    detector_depth : float
    earth_model : PREM file path
    prop_height : quantity (dimensionless)
    YeI : quantity (dimensionless)
    YeO : quantity (dimensionless)
    YeM : quantity (dimensionless)
    theta12 : quantity (angle)
    theta13 : quantity (angle)
    theta23 : quantity (angle)
    deltam21 : quantity (mass^2)
    deltam31 : quantity (mass^2)
    deltacp : quantity (angle)

    None

    Notes
    -----

    """
    def __init__(
        self,
        data=None,
        params=None,
        input_names=None,
        output_names=None,
        debug_mode=None,
        input_specs=None,
        calc_specs=None,
        output_specs=None,
    ):

        expected_params = (
            'detector_depth',
            'earth_model',
            'prop_height',
            'YeI',
            'YeO',
            'YeM',
            'theta12',
            'theta13',
            'theta23',
            'deltam21',
            'deltam31',
            'deltacp',
        )

        input_names = ()
        output_names = ()

        # what are the keys used from the inputs during apply
        input_apply_keys = (
            'weights',
            'sys_flux',
        )
        # what are keys added or altered in the calculation used during apply
        output_calc_keys = (
            'prob_e',
            'prob_mu',
        )
        # what keys are added or altered for the outputs during apply
        output_apply_keys = ('weights', )

        # init base class
        super(pi_prob3, self).__init__(
            data=data,
            params=params,
            expected_params=expected_params,
            input_names=input_names,
            output_names=output_names,
            debug_mode=debug_mode,
            input_specs=input_specs,
            calc_specs=calc_specs,
            output_specs=output_specs,
            input_apply_keys=input_apply_keys,
            output_calc_keys=output_calc_keys,
            output_apply_keys=output_apply_keys,
        )

        assert self.input_mode is not None
        assert self.calc_mode is not None
        assert self.output_mode is not None

        self.layers = None
        self.osc_params = None

    def setup_function(self):

        # object for oscillation parameters
        self.osc_params = OscParams()

        # setup the layers
        #if self.params.earth_model.value is not None:
        earth_model = find_resource(self.params.earth_model.value)
        YeI = self.params.YeI.value.m_as('dimensionless')
        YeO = self.params.YeO.value.m_as('dimensionless')
        YeM = self.params.YeM.value.m_as('dimensionless')
        prop_height = self.params.prop_height.value.m_as('km')
        detector_depth = self.params.detector_depth.value.m_as('km')
        self.layers = Layers(earth_model, detector_depth, prop_height)
        self.layers.setElecFrac(YeI, YeO, YeM)

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        # --- calculate the layers ---
        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            # as layers don't care about flavour
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc', 'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                'nuebar_nc', 'numubar_nc', 'nutaubar_nc'
            ])

        for container in self.data:
            self.layers.calcLayers(container['true_coszen'].get('host'))
            container['densities'] = self.layers.density.reshape(
                (container.size, self.layers.max_layers))
            container['distances'] = self.layers.distance.reshape(
                (container.size, self.layers.max_layers))

        # don't forget to un-link everything again
        self.data.unlink_containers()

        # --- setup empty arrays ---
        if self.calc_mode == 'binned':
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc'
            ])
            self.data.link_containers('nubar', [
                'nuebar_cc', 'numubar_cc', 'nutaubar_cc', 'nuebar_nc',
                'numubar_nc', 'nutaubar_nc'
            ])
        for container in self.data:
            container['probability'] = np.empty((container.size, 3, 3),
                                                dtype=FTYPE)
        self.data.unlink_containers()

        # setup more empty arrays
        for container in self.data:
            container['prob_e'] = np.empty((container.size), dtype=FTYPE)
            container['prob_mu'] = np.empty((container.size), dtype=FTYPE)

    def calc_probs(self, nubar, e_array, rho_array, len_array, out):
        ''' wrapper to execute osc. calc '''
        propagate_array(
            self.osc_params.
            dm_matrix,  # pylint: disable = unexpected-keyword-arg, no-value-for-parameter
            self.osc_params.mix_matrix_complex,
            self.osc_params.nsi_eps,
            nubar,
            e_array.get(WHERE),
            rho_array.get(WHERE),
            len_array.get(WHERE),
            out=out.get(WHERE))
        out.mark_changed(WHERE)

    @profile
    def compute_function(self):

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc'
            ])
            self.data.link_containers('nubar', [
                'nuebar_cc', 'numubar_cc', 'nutaubar_cc', 'nuebar_nc',
                'numubar_nc', 'nutaubar_nc'
            ])

        # --- update mixing params ---
        self.osc_params.theta12 = self.params.theta12.value.m_as('rad')
        self.osc_params.theta13 = self.params.theta13.value.m_as('rad')
        self.osc_params.theta23 = self.params.theta23.value.m_as('rad')
        self.osc_params.dm21 = self.params.deltam21.value.m_as('eV**2')
        self.osc_params.dm31 = self.params.deltam31.value.m_as('eV**2')
        self.osc_params.deltacp = self.params.deltacp.value.m_as('rad')

        for container in self.data:
            self.calc_probs(
                container['nubar'],
                container['true_energy'],
                container['densities'],
                container['distances'],
                out=container['probability'],
            )

        # the following is flavour specific, hence unlink
        self.data.unlink_containers()

        for container in self.data:
            # initial electrons (0)
            fill_probs(
                container['probability'].get(WHERE),
                0,
                container['flav'],
                out=container['prob_e'].get(WHERE),
            )
            # initial muons (1)
            fill_probs(
                container['probability'].get(WHERE),
                1,
                container['flav'],
                out=container['prob_mu'].get(WHERE),
            )

            container['prob_e'].mark_changed(WHERE)
            container['prob_mu'].mark_changed(WHERE)

    @profile
    def apply_function(self):

        # update the outputted weights
        for container in self.data:
            apply_probs(container['sys_flux'].get(WHERE),
                        container['prob_e'].get(WHERE),
                        container['prob_mu'].get(WHERE),
                        out=container['weights'].get(WHERE))
            container['weights'].mark_changed(WHERE)
Esempio n. 3
0
class pi_earth_absorption(PiStage):
    """
    earth absorption PISA Pi class

    Paramaters
    ----------
    earth_model : str
        PREM file path
    xsec_file : str
        path to ROOT file containing cross-sections
    detector_depth : quantity (distance), optional
        detector depth
    prop_height : quantity (distance), optional
        height of neutrino production in the atmosphere
    
    Notes
    -----
    
    """
    def __init__(self,
                 earth_model,
                 xsec_file,
                 data=None,
                 params=None,
                 input_names=None,
                 output_names=None,
                 debug_mode=None,
                 input_specs=None,
                 calc_specs=None,
                 output_specs=None,
                 detector_depth=2. * ureg.km,
                 prop_height=20. * ureg.km):

        expected_params = ()
        input_names = ()
        output_names = ()

        input_apply_keys = ('weights', )
        # The weights are simply scaled by the earth survival probability
        output_calc_keys = ('survival_prob', )
        output_apply_keys = ('weights', )

        # init base class
        super(pi_earth_absorption, self).__init__(
            data=data,
            params=params,
            expected_params=expected_params,
            input_names=input_names,
            output_names=output_names,
            debug_mode=debug_mode,
            input_specs=input_specs,
            calc_specs=calc_specs,
            output_specs=output_specs,
            input_apply_keys=input_apply_keys,
            output_calc_keys=output_calc_keys,
            output_apply_keys=output_apply_keys,
        )

        assert self.input_mode is not None
        assert self.calc_mode is not None
        assert self.output_mode is not None

        self.layers = None
        self.xsroot = None
        self.earth_model = earth_model
        self.xsec_file = xsec_file
        self.detector_depth = detector_depth.m_as('km')
        self.prop_height = prop_height.m_as('km')
        # this does nothing for speed, but makes for convenient numpy style broadcasting
        # TODO: Use numba vectorization (not sure how that works with splines)
        self.calculate_xsections = np.vectorize(self.calculate_xsections)

    def setup_function(self):
        import ROOT
        # setup the layers
        earth_model = find_resource(self.earth_model)
        self.layers = Layers(earth_model, self.detector_depth,
                             self.prop_height)
        # This is a bit hacky, but setting the electron density to 1.
        # gives us the total density of matter, which is what we want.
        self.layers.setElecFrac(1., 1., 1.)

        # setup cross-sections
        self.xsroot = ROOT.TFile(self.xsec_file)
        # set the correct data mode
        self.data.data_specs = self.calc_specs

        # --- calculate the layers ---
        if self.calc_mode == 'binned':
            # layers don't care about flavor
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc', 'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                'nuebar_nc', 'numubar_nc', 'nutaubar_nc'
            ])

        for container in self.data:
            self.layers.calcLayers(container['true_coszen'].get(WHERE))
            container['densities'] = self.layers.density.reshape(
                (container.size, self.layers.max_layers))
            container['distances'] = self.layers.distance.reshape(
                (container.size, self.layers.max_layers))
            container['rho_int'] = np.empty((container.size), dtype=FTYPE)
        # don't forget to un-link everything again
        self.data.unlink_containers()

        # --- setup cross section and survival probability ---
        if self.calc_mode == 'binned':
            # The cross-sections do not depend on nc/cc, so we can at least link those containers
            self.data.link_containers('nue', ['nue_cc', 'nue_nc'])
            self.data.link_containers('nuebar', ['nuebar_cc', 'nuebar_nc'])
            self.data.link_containers('numu', ['numu_cc', 'numu_nc'])
            self.data.link_containers('numubar', ['numubar_cc', 'numubar_nc'])
            self.data.link_containers('nutau', ['nutau_cc', 'nutau_nc'])
            self.data.link_containers('nutaubar',
                                      ['nutaubar_cc', 'nutaubar_nc'])
        for container in self.data:
            container['xsection'] = np.empty((container.size), dtype=FTYPE)
            container['survival_prob'] = np.empty((container.size),
                                                  dtype=FTYPE)
        self.data.unlink_containers()

    @profile
    def compute_function(self):
        # --- calculate the integrated density in the layers ---
        if self.calc_mode == 'binned':
            self.data.link_containers('nu', [
                'nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc',
                'nutau_nc', 'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                'nuebar_nc', 'numubar_nc', 'nutaubar_nc'
            ])

        for container in self.data:
            calculate_integrated_rho(container['distances'].get(WHERE),
                                     container['densities'].get(WHERE),
                                     out=container['rho_int'].get(WHERE))
        # don't forget to un-link everything again
        self.data.unlink_containers()

        # --- calculate survival probability ---
        if self.calc_mode == 'binned':
            # The cross-sections do not depend on nc/cc, so we can at least link those containers
            self.data.link_containers('nue', ['nue_cc', 'nue_nc'])
            self.data.link_containers('nuebar', ['nuebar_cc', 'nuebar_nc'])
            self.data.link_containers('numu', ['numu_cc', 'numu_nc'])
            self.data.link_containers('numubar', ['numubar_cc', 'numubar_nc'])
            self.data.link_containers('nutau', ['nutau_cc', 'nutau_nc'])
            self.data.link_containers('nutaubar',
                                      ['nutaubar_cc', 'nutaubar_nc'])
        for container in self.data:
            container['xsection'] = self.calculate_xsections(
                container['flav'], container['nubar'],
                container['true_energy'].get(WHERE))
            calculate_survivalprob(container['rho_int'].get(WHERE),
                                   container['xsection'].get(WHERE),
                                   out=container['survival_prob'].get(WHERE))
            container['survival_prob'].mark_changed(WHERE)
        self.data.unlink_containers()

    @profile
    def apply_function(self):
        for container in self.data:
            vectorizer.multiply(container['survival_prob'],
                                out=container['weights'])

    def calculate_xsections(self, flav, nubar, energy):
        '''Calculates the cross-sections on isoscalar targets.
        The result is returned in cm^2. The xsection on one 
        target is calculated by taking the xsection for O16
        and dividing it by 16. 
        '''
        flavor = FLAV_BAR_STR_MAPPING[(flav, nubar)]
        return (self.xsroot.Get('nu_' + flavor +
                                '_O16').Get('tot_cc').Eval(energy) +
                self.xsroot.Get('nu_' + flavor + '_O16').Get('tot_nc').Eval(
                    energy)) * 10**(-38) / 16.  # this gives cm^2
Esempio n. 4
0
class nusquids(Stage):
    """
    PISA Pi stage for weighting events due to the effect of neutrino oscillations, using
    nuSQuIDS as the oscillation probability calculator. One specialty here is that we
    have to specify an additional binning to determine where to place nodes for the
    exact calculation. The points where the actual probability is evaluated is
    determined by calc_mode as usual and may be much finer than node_mode or even
    event-wise since the interpolation step is fast.

    Parameters
    ----------

    Uses the standard parameters as required by a PISA pi stage
    (see `pisa/core/stage.py`)

    node_mode : MultiDimBinning
        Binning to determine where to place nodes at which the evaluation of interaction
        states occurs. The nodes are places at the _corners_ of the binning to avoid
        extrapolation.

    use_decoherence : bool
        set to true to include neutrino decoherence in the oscillation probability
        calculation

    num_decoherence_gamma : int
        number of decoherence gamma parameters to be considered in the decoherence model
        must be either 1 or 3

    use_nsi : bool
        set to true to include Non-Standard Interactions (NSI) in the oscillation
        probability calculation

    num_neutrinos : int
        Number of neutrino flavors to include. This stage supports 3 or 4 flavors, but
        nuSQuIDS allows up to 6 such that this stage could easily be expanded.

    earth_model : str
        Path to Earth model (PREM) file.

    detector_depth : quantity (distance)

    prop_height : quantity (distance) or str
        Height at which neutrinos are produced. If a quantity is given, the height is
        assumed to be the same for all neutrinos. An alternative is to pass
        `from_container`. In that case, the stage will search the input container for a
        key `prop_height` and take the height from there on a bin-wise or event-wise
        basis depending on `calc_specs`.

    prop_height_range : quantity (distance)
        Production height is averaged around the mean set by `prop_height` assuming
        a uniform distribution in [mean - range/2, mean + range/2]. The production
        heights are projected onto the direction of the neutrino, such that the
        averaging range is longer for shallow angles above the horizon.

    apply_lowpass_above_hor : bool
        Whether to apply the low-pass filter for evaluations above the horizon. If
        `True` (default), the low-pass filter is applied everywhere. If `False`, the
        filter is applied only below the horizon. Because propagation distances are
        very short above the horizon, fast oscillations no longer average out and the
        filter might wash out important features.

    apply_height_avg_below_hor : bool
        Whether to apply the production height averaging below the horizon. If `True`
        (default), the production height averaging is applied everywhere if a
        `prop_height_range` is set. If `False`, the height averaging is only applied
        above the horizon. Since the production height is only a very small fraction
        of the total propagation distance below the horizon, the height averaging is
        no longer important and a little bit of time can be saved by computing the
        slightly cheaper non-averaged probabilities.

    YeI : quantity (dimensionless)
        Inner electron fraction.

    YeO : quantity (dimensionless)
        Outer electron fraction.

    YeM : quantity (dimensionless)
        Mantle electron fraction.

    rel_err : float
        Relative error of the numerical integration

    abs_err : float
        Absolute error of the numerical integration

    prop_lowpass_cutoff : quantity (1/distance)
        Frequency cutoff for fast oscillations applied during numerical integration
        of the interaction state. The frequency is passed as oscillations per distance.
        A reasonable order of magnitude would allow ~100 oscillations over 12000 km.

    prop_lowpass_frac : quantity (dimensionless)
        This number determines how harsh the cut-off of the low-pass filter is applied
        during numerical integration is. A value of 0.1 would mean that the filter would
        begin to kick in when 90% of the cutoff frequency is reached and linearly
        decrease oscillation amplitudes until the cutoff is reached.

    eval_lowpass_cutoff : quantity (1/distance)
        Same as `prop_lowpass_cutoff`, but applied during evaluation of interpolated
        states, not during integration.

    eval_lowpass_frac : quantity (1/distance)
        Same as `prop_lowpass_frac`, but applied during evaluation of interpolated
        states, not during integration.

    suppress_interpolation_warning : bool
        Suppress warning about negative probabilities that can indicate insufficient
        nodes in a problematic region of energy and coszen. Set this option only at your
        own risk after you optimized nodes and are sure that remaining negative
        probabilities won't be a problem!

    exact_mode : bool
        With this turned on, the probabilities are evaluated using the exact calculation
        for constant densities in every layer without numerical integration. This method
        is much faster than the numerical integration for a node, but you lose the
        option to project out probabilities from interaction picture states. In this
        mode, nuSQuIDS behaves essentially like GLoBES with the same speed trade-off.
        You cannot apply filters in this mode either. Its only recommended use is for
        pseudo-data generation, where you may want an exact event-by-event calculation
        that is allowed to take several minutes.

    vacuum : bool
        Do not include matter effects. Greatly increases evaluation speed.

    params : ParamSet or sequence with which to instantiate a ParamSet.
        Expected params .. ::
            theta12 : quantity (angle)
            theta13 : quantity (angle)
            theta23 : quantity (angle)
            deltam21 : quantity (mass^2)
            deltam31 : quantity (mass^2)
            deltacp : quantity (angle)
        Additional expected params if `num_neutrinos == 4` .. ::
            theta14 : quantity (angle)
            theta24 : quantity (angle)
            deltam41 : quantity (mass^2)
            deltacp14 : quantity (angle)
            deltacp24 : quantity (angle)

    Additional ParamSet params expected when using the `use_decoherence` argument:
        n_energy : quantity (dimensionless)
        * If using `num_decoherence_gamma` == 1:
            gamma : quantity (energy)
        * If using `num_decoherence_gamma` == 3:
            gamma12 : quantity (energy)
            gamma13 : quantity (energy)
            gamma23 : quantity (energy)

    """

    def __init__(
        self,
        earth_model=None,
        detector_depth=None,
        prop_height=None,
        prop_height_range=None,
        YeI=None,
        YeO=None,
        YeM=None,
        rel_err=None,
        abs_err=None,
        prop_lowpass_cutoff=None,
        prop_lowpass_frac=None,
        eval_lowpass_cutoff=None,
        eval_lowpass_frac=None,
        apply_lowpass_above_hor=True,
        apply_height_avg_below_hor=True,
        suppress_interpolation_warning=False,
        node_mode=None,
        use_decoherence=False,
        num_decoherence_gamma=1,
        use_nsi=False,
        num_neutrinos=3,
        use_taus=False,
        exact_mode=False,
        vacuum=False,
        **std_kwargs,
    ):

        # Checks
        if use_nsi:
            raise NotImplementedError("NSI not implemented")
        if type(prop_height) is not ureg.Quantity:
            raise NotImplementedError(
                "Getting propagation heights from containers is "
                "not yet implemented, saw {} type".format(type(prop_height))
            )

        # Store args
        self.num_neutrinos = int(num_neutrinos)
        assert (
            self.num_neutrinos < 5
        ), "currently only supports up to 4 flavor oscillations"
        self.use_nsi = use_nsi
        self.use_decoherence = use_decoherence
        self.num_decoherence_gamma = num_decoherence_gamma
        self.node_mode = node_mode
        self.vacuum = vacuum
        self.use_taus = use_taus
        self.earth_model = earth_model
        self.YeI = YeI.m_as("dimensionless")
        self.YeO = YeO.m_as("dimensionless")
        self.YeM = YeM.m_as("dimensionless")
        self.detector_depth = detector_depth.m_as("km")
        self.prop_height = prop_height.m_as("km")
        self.avg_height = False
        self.concurrent_threads = PISA_NUM_THREADS if TARGET == "parallel" else 1
        self.prop_height_range = None
        self.apply_height_avg_below_hor = apply_height_avg_below_hor
        if prop_height_range is not None:  # this is optional
            self.prop_height_range = prop_height_range.m_as("km")
            self.avg_height = True

        self.layers = None

        self.rel_err = rel_err.m_as("dimensionless") if rel_err is not None else 1.0e-10
        self.abs_err = abs_err.m_as("dimensionless") if abs_err is not None else 1.0e-10
        self.prop_lowpass_cutoff = (
            prop_lowpass_cutoff.m_as("1/km") if prop_lowpass_cutoff is not None else 0.0
        )
        self.prop_lowpass_frac = (
            prop_lowpass_frac.m_as("dimensionless")
            if prop_lowpass_frac is not None
            else 0.0
        )
        self.eval_lowpass_cutoff = (
            eval_lowpass_cutoff.m_as("1/km") if eval_lowpass_cutoff is not None else 0.0
        )
        self.eval_lowpass_frac = (
            eval_lowpass_frac.m_as("dimensionless")
            if eval_lowpass_frac is not None
            else 0.0
        )

        if self.prop_lowpass_frac > 1.0 or self.eval_lowpass_frac > 1.0:
            raise ValueError("lowpass filter fraction cannot be greater than one")

        if self.prop_lowpass_frac < 0.0 or self.eval_lowpass_frac < 0.0:
            raise ValueError("lowpass filter fraction cannot be smaller than zero")

        self.apply_lowpass_above_hor = apply_lowpass_above_hor

        self.nus_layer = None
        self.nus_layerbar = None

        # Define the layers class
        self.nusquids_layers_class = nsq.nuSQUIDSLayers

        # Define standard params
        expected_params = [
            "theta12",
            "theta13",
            "theta23",
            "deltam21",
            "deltam31",
            "deltacp",
        ]

        # Add decoherence parameters
        if self.use_decoherence:
            # Use derived nuSQuIDS classes
            import nuSQUIDSDecohPy

            self.nusquids_layers_class = nuSQUIDSDecohPy.nuSQUIDSDecohLayers
            # Checks
            assert (
                self.num_neutrinos == 3
            ), "Decoherence only supports 3 neutrinos currently"
            # Add decoherence params
            expected_params.extend(["gamma0"])
            expected_params.extend(["n"])
            expected_params.extend(["E0"])

        # We may want to reparametrize this with the difference between deltacp14 and
        # deltacp24, as the absolute value seems to play a small role (see
        # https://arxiv.org/pdf/2010.06321.pdf)
        if self.num_neutrinos == 4:
            expected_params.extend(
                [
                    "theta14",
                    "theta24",
                    "theta34",
                    "deltam41",
                    "deltacp14",
                    "deltacp24",
                ]
            )

        # init base class
        super().__init__(
            expected_params=expected_params,
            **std_kwargs,
        )

        # This is special: We have an additional "binning" to account for. It is in
        # principle possible to work in event mode even for the nodes, which would mean
        # that the full oscillation problem is solved for all events individually.
        # Together with the constant oscillation mode, this can be used to calculate
        # probabilities in exact mode in a time that is reasonable at least for
        # generating pseudodata.

        assert not (self.use_nsi and self.use_decoherence), (
            "NSI and decoherence not " "suported together, must use one or the other"
        )

        self.exact_mode = exact_mode

        if exact_mode:
            # No interpolation is happening in exact mode so any passed node_mode
            # will be ignored. Probabilities are calculated at calc_specs.
            if self.node_mode is not None:
                logging.warn(
                    "nuSQuIDS is configured in exact mode, the passed "
                    f"`node_mode`\n({self.node_mode})\n will be ignored!"
                )
            if self.prop_lowpass_cutoff > 0 or self.eval_lowpass_cutoff > 0:
                logging.warn(
                    "nuSQuIDS is configured in exact mode, low-pass filters "
                    "will be ignored"
                )
        else:
            if isinstance(self.calc_mode, MultiDimBinning):
                assert isinstance(self.node_mode, MultiDimBinning), (
                    "cannot use " "event-wise nodes with binned calculation"
                )

        self.e_node_mode = None
        self.e_mesh = None
        self.coszen_node_mode = None
        self.cosz_mesh = None

        # We don't want to spam the user with repeated warnings about the same issue.
        self.interpolation_warning_issued = suppress_interpolation_warning

    def set_osc_parameters(self, nus_layer):
        # nuSQuIDS uses zero-index for mixing angles
        nus_layer.Set_MixingAngle(0, 1, self.params.theta12.value.m_as("rad"))
        nus_layer.Set_MixingAngle(0, 2, self.params.theta13.value.m_as("rad"))
        nus_layer.Set_MixingAngle(1, 2, self.params.theta23.value.m_as("rad"))

        # mass differences in nuSQuIDS are always w.r.t. m_1
        nus_layer.Set_SquareMassDifference(1, self.params.deltam21.value.m_as("eV**2"))
        nus_layer.Set_SquareMassDifference(2, self.params.deltam31.value.m_as("eV**2"))

        nus_layer.Set_CPPhase(0, 2, self.params.deltacp.value.m_as("rad"))

        # set decoherence parameters
        if self.use_decoherence:
            nsq_units = nsq.Const()  # TODO Once only (make into a member)
            gamma0 = self.params.gamma0.value.m_as("eV") * nsq_units.eV
            gamma0_matrix_diagonal = np.array(
                [0.0, gamma0, gamma0, gamma0, gamma0, gamma0, gamma0, gamma0, gamma0]
            )  # "State selection" case (see arXiv:2007.00068 eqn 11) #TODO implement other models
            nus_layer.Set_DecoherenceGammaMatrixDiagonal(gamma0_matrix_diagonal)
            nus_layer.Set_DecoherenceGammaEnergyDependence(
                self.params.n.value.m_as("dimensionless")
            )
            nus_layer.Set_DecoherenceGammaEnergyScale(
                self.params.E0.value.m_as("eV") * nsq_units.eV
            )

        if self.num_neutrinos == 3:
            return

        nus_layer.Set_MixingAngle(0, 3, self.params.theta14.value.m_as("rad"))
        nus_layer.Set_MixingAngle(1, 3, self.params.theta24.value.m_as("rad"))
        nus_layer.Set_MixingAngle(2, 3, self.params.theta34.value.m_as("rad"))
        nus_layer.Set_SquareMassDifference(3, self.params.deltam41.value.m_as("eV**2"))
        nus_layer.Set_CPPhase(0, 3, self.params.deltacp14.value.m_as("rad"))
        nus_layer.Set_CPPhase(1, 3, self.params.deltacp24.value.m_as("rad"))
        # TODO: Implement NSI, decoherence

    def apply_prop_settings(self, nus_layer):
        nsq_units = nsq.Const()
        nus_layer.Set_rel_error(self.rel_err)
        nus_layer.Set_abs_error(self.abs_err)
        nus_layer.Set_EvolLowPassCutoff(self.prop_lowpass_cutoff / nsq_units.km)
        # The ramp of the low-pass filter starts to drop at (cutoff - scale)
        scale = self.prop_lowpass_frac * self.prop_lowpass_cutoff / nsq_units.km
        nus_layer.Set_EvolLowPassScale(scale)
        nus_layer.Set_AllowConstantDensityOscillationOnlyEvolution(self.exact_mode)
        nus_layer.Set_EvalThreads(self.concurrent_threads)

    def setup_function(self):

        earth_model = find_resource(self.earth_model)
        prop_height = self.prop_height
        detector_depth = self.detector_depth
        self.layers = Layers(earth_model, detector_depth, prop_height)
        # We must treat densities and electron fractions correctly here, so we set them
        # to 1 in the Layers module to get unweighted densities.
        self.layers.setElecFrac(1, 1, 1)

        nsq_units = nsq.Const()  # natural units for nusquids
        # Because we don't want to extrapolate, we check that all points at which we
        # want to evaluate probabilities are fully contained within the node specs. This
        # is of course not necessary in events mode.
        if isinstance(self.node_mode, MultiDimBinning) and not self.exact_mode:
            logging.debug("setting up nuSQuIDS nodes in binned mode")
            # we can prepare the calculator like this only in binned mode, see
            # compute_function for node_mode == "events"
            self.data.representation = self.calc_mode
            for container in self.data:
                for var in ["true_coszen", "true_energy"]:
                    unit = "dimensionless" if var == "true_coszen" else "GeV"
                    upper_bound = np.max(self.node_mode[var].bin_edges.m_as(unit))
                    lower_bound = np.min(self.node_mode[var].bin_edges.m_as(unit))
                    err_msg = (
                        "The outer edges of the node_mode must encompass "
                        "the entire range of calc_specs to avoid extrapolation"
                    )
                    if np.any(container[var] > upper_bound):
                        maxval = np.max(container[var])
                        raise ValueError(
                            err_msg + f"\nmax input: {maxval}, upper "
                            f"bound: {upper_bound}"
                        )
                    if np.any(container[var] < lower_bound):
                        minval = np.max(container[var])
                        raise ValueError(
                            err_msg + f"\nmin input: {minval}, lower "
                            f"bound: {lower_bound}"
                        )

            # Layers in nuSQuIDS are special: We need all the individual distances and
            # densities for the nodes to solve the interaction picture states, but on
            # the final calculation grid (or events) we only need the *total* traversed
            # distance. Because we are placing nodes at the bin edges rather than the
            # bin middle, this doesn't really fit with how containers store data, so we
            # are making arrays as variables that never go into the container.

            # These are stored because we need them later during interpolation
            self.coszen_node_mode = self.node_mode["true_coszen"].bin_edges.m_as(
                "dimensionless"
            )
            self.e_node_mode = self.node_mode["true_energy"].bin_edges.m_as("GeV")
            logging.debug(
                f"Setting up nodes at\n"
                f"cos_zen = \n{self.coszen_node_mode}\n"
                f"energy = \n{self.e_node_mode}\n"
            )
            # things are getting a bit meshy from here...
            self.e_mesh, self.cosz_mesh = np.meshgrid(
                self.e_node_mode, self.coszen_node_mode
            )
            e_nodes = self.e_mesh.ravel()
            coszen_nodes = self.cosz_mesh.ravel()

            # The lines below should not be necessary because we will always get at
            # least two numbers from the bin edges. However, if either energy or coszen
            # somehow was just a scalar, we would need to broadcast it out to the same
            # size. Keeping the code in here in case you want to use the stage in 1D.
            # convert lists to ndarrays and scalars to ndarrays with length 1
            e_nodes = np.atleast_1d(e_nodes)
            coszen_nodes = np.atleast_1d(coszen_nodes)
            # broadcast against each other and make a copy
            # (see https://numpy.org/doc/stable/reference/generated/numpy.broadcast_arrays.html)
            e_nodes, coszen_nodes = [
                np.array(a) for a in np.broadcast_arrays(e_nodes, coszen_nodes)
            ]

            assert len(e_nodes) == len(coszen_nodes)
            assert coszen_nodes.ndim == 1
            assert e_nodes.ndim == 1

            self.layers.calcLayers(coszen_nodes)
            distances = np.reshape(
                self.layers.distance, (len(e_nodes), self.layers.max_layers)
            )
            densities = np.reshape(
                self.layers.density, (len(e_nodes), self.layers.max_layers)
            )
            # HACK: We need the correct electron densities for each layer. We can
            # determine whether we are in the core or mantle based on the density.
            # Needless to say it isn't optimal to have these numbers hard-coded.
            ye = np.zeros_like(densities)
            ye[densities < 10] = self.YeM
            ye[(densities >= 10) & (densities < 13)] = self.YeO
            ye[densities >= 13] = self.YeI
            self.nus_layer = self.nusquids_layers_class(
                distances * nsq_units.km,
                densities,
                ye,
                e_nodes * nsq_units.GeV,
                self.num_neutrinos,
                nsq.NeutrinoType.both,
            )
            self.apply_prop_settings(self.nus_layer)

        # Now that we have our nusquids calculator set up on the node grid, we make
        # container output space for the probability output which may be on a finer grid
        # than the nodes or even working in events mode.
        self.data.representation = self.calc_mode

        # --- calculate the layers ---
        if isinstance(self.calc_mode, MultiDimBinning):
            # as layers don't care about flavour
            self.data.link_containers(
                "nu",
                [
                    "nue_cc",
                    "numu_cc",
                    "nutau_cc",
                    "nue_nc",
                    "numu_nc",
                    "nutau_nc",
                    "nuebar_cc",
                    "numubar_cc",
                    "nutaubar_cc",
                    "nuebar_nc",
                    "numubar_nc",
                    "nutaubar_nc",
                ],
            )

        # calculate the distance difference between minimum and maximum production
        # height, if applicable
        if self.avg_height:
            layers_min = Layers(
                earth_model,
                detector_depth,
                self.prop_height - self.prop_height_range / 2.0,
            )
            layers_min.setElecFrac(1, 1, 1)
            layers_max = Layers(
                earth_model,
                detector_depth,
                self.prop_height + self.prop_height_range / 2.0,
            )
            layers_max.setElecFrac(1, 1, 1)

        for container in self.data:
            self.layers.calcLayers(container["true_coszen"])
            distances = self.layers.distance.reshape((container.size, -1))
            tot_distances = np.sum(distances, axis=1)
            if self.avg_height:
                layers_min.calcLayers(container["true_coszen"])
                dists_min = layers_min.distance.reshape((container.size, -1))
                min_tot_dists = np.sum(dists_min, axis=1)

                layers_max.calcLayers(container["true_coszen"])
                dists_max = layers_max.distance.reshape((container.size, -1))
                max_tot_dists = np.sum(dists_max, axis=1)
                # nuSQuIDS assumes the original distance is the longest distance and
                # the averaging range is the difference between the minimum and maximum
                # distance.
                avg_ranges = max_tot_dists - min_tot_dists
                tot_distances = max_tot_dists
                assert np.all(avg_ranges > 0)
            # If the low-pass cutoff is zero, nusquids will not evaluate the filter.
            container["lowpass_cutoff"] = self.eval_lowpass_cutoff * np.ones(
                container.size
            )
            if not self.apply_lowpass_above_hor:
                container["lowpass_cutoff"] = np.where(
                    container["true_coszen"] >= 0, 0, container["lowpass_cutoff"]
                )
            if isinstance(self.node_mode, MultiDimBinning) and not self.exact_mode:
                # To project out probabilities we only need the *total* distance
                container["tot_distances"] = tot_distances
                if self.avg_height:
                    container["avg_ranges"] = avg_ranges
                else:
                    container["avg_ranges"] = np.zeros(container.size, dtype=FTYPE)
                if not self.apply_height_avg_below_hor:
                    container["avg_ranges"] = np.where(
                        container["true_coszen"] >= 0, container["avg_ranges"], 0.0
                    )
            elif self.node_mode == "events" or self.exact_mode:
                # in any other mode (events or exact) we store all densities and
                # distances in the container in calc_specs
                densities = self.layers.density.reshape((container.size, -1))
                container["densities"] = densities
                container["distances"] = distances

        self.data.unlink_containers()

        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar", ["nutaubar_cc", "nutaubar_nc"])

        # setup more empty arrays
        for container in self.data:
            container["prob_e"] = np.empty((container.size), dtype=FTYPE)
            container["prob_mu"] = np.empty((container.size), dtype=FTYPE)
            if self.use_taus:
                container["prob_tau"] = np.empty((container.size), dtype=FTYPE)

        self.data.unlink_containers()

        if self.exact_mode:
            return

        # --- containers for interpolated states ---
        # This is not needed in exact mode
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers(
                "nu", ["nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc", "nutau_nc"]
            )
            self.data.link_containers(
                "nubar",
                [
                    "nuebar_cc",
                    "numubar_cc",
                    "nutaubar_cc",
                    "nuebar_nc",
                    "numubar_nc",
                    "nutaubar_nc",
                ],
            )
        for container in self.data:
            container["interp_states_e"] = np.empty(
                (container.size, self.num_neutrinos ** 2),
                dtype=FTYPE,
            )
            container["interp_states_mu"] = np.empty(
                (container.size, self.num_neutrinos ** 2),
                dtype=FTYPE,
            )
            container["interp_states_tau"] = np.empty(
                (container.size, self.num_neutrinos ** 2),
                dtype=FTYPE,
            )
        self.data.unlink_containers()
        self.interpolation_warning_issued = False

    # @line_profile
    def calc_node_probs(self, nus_layer, flav_in, flav_out, n_nodes):
        """
        Evaluate oscillation probabilities at nodes. This does not require any
        interpolation.
        """
        ini_state = np.array([0] * self.num_neutrinos)
        ini_state[flav_in] = 1
        nus_layer.Set_initial_state(ini_state, nsq.Basis.flavor)
        if not self.vacuum:
            nus_layer.EvolveState()
        prob_nodes = nus_layer.EvalFlavorAtNodes(flav_out)
        return prob_nodes

    def calc_interpolated_states(self, evolved_states, e_out, cosz_out):
        """
        Calculate interpolated states at the energies and zenith angles requested.
        """
        nsq_units = nsq.Const()
        interp_states = np.zeros((e_out.size, evolved_states.shape[1]))

        assert np.all(e_out <= np.max(self.e_node_mode * nsq_units.GeV))
        assert np.all(e_out >= np.min(self.e_node_mode * nsq_units.GeV))
        assert np.all(cosz_out <= np.max(self.coszen_node_mode))
        assert np.all(cosz_out >= np.min(self.coszen_node_mode))

        for i in range(evolved_states.shape[1]):
            z = evolved_states[:, i].reshape(self.e_mesh.shape).T
            assert np.all(np.isfinite(z))
            # RectBivariateSpline takes in the 1D node position and assumes that they
            # are on a mesh.
            f = RectBivariateSpline(
                np.log10(self.e_node_mode * nsq_units.GeV),
                self.coszen_node_mode,
                z,
                kx=2,
                ky=2,
            )
            interp_states[..., i] = f(np.log10(e_out), cosz_out, grid=False)
        return interp_states

    def calc_probs_interp(
        self,
        flav_out,
        nubar,
        interp_states,
        out_distances,
        e_out,
        avg_ranges=0,
        lowpass_cutoff=0,
    ):
        """
        Project out probabilities from interpolated interaction picture states.
        """
        nsq_units = nsq.Const()

        prob_interp = np.zeros(e_out.size)
        scale = self.eval_lowpass_frac * lowpass_cutoff
        prob_interp = self.nus_layer.EvalWithState(
            flav_out,
            out_distances,
            e_out,
            interp_states,
            rho=int(nubar),
            avg_cutoff=0.0,
            avg_scale=0.0,
            # Range averaging is only computed in the places where t_range > 0, so
            # we don't need to introduce switches for averaged and non-averaged regions.
            lowpass_cutoff=lowpass_cutoff,
            lowpass_scale=scale,
            t_range=avg_ranges,
        )
        return prob_interp

    def compute_function_no_interpolation(self):
        """
        Version of the compute function that does not use any interpolation between
        nodes.
        """
        nsq_units = nsq.Const()
        # it is possible to work in binned calc mode while being in exact mode
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar", ["nutaubar_cc", "nutaubar_nc"])
        for container in self.data:
            nubar = container["nubar"] < 0
            flav = container["flav"]
            # HACK: We need the correct electron densities for each layer. We can
            # determine whether we are in the core or mantle based on the density.
            ye = np.zeros_like(container["densities"])
            ye[container["densities"] < 10] = self.YeM
            ye[
                (container["densities"] >= 10) & (container["densities"] < 13)
            ] = self.YeO
            ye[container["densities"] >= 13] = self.YeI
            nus_layer = self.nusquids_layers_class(
                container["distances"] * nsq_units.km,
                container["densities"],
                ye,
                container["true_energy"] * nsq_units.GeV,
                self.num_neutrinos,
                nsq.NeutrinoType.antineutrino if nubar else nsq.NeutrinoType.neutrino,
            )
            self.apply_prop_settings(nus_layer)
            self.set_osc_parameters(nus_layer)
            container["prob_e"] = self.calc_node_probs(
                nus_layer, 0, flav, container.size
            )
            container["prob_mu"] = self.calc_node_probs(
                nus_layer, 1, flav, container.size
            )
            container.mark_changed("prob_e")
            container.mark_changed("prob_mu")
            if self.use_taus:
                container["prob_tau"] = self.calc_node_probs(
                    nus_layer, 2, flav, container.size
                )
                container.mark_changed("prob_tau")
        self.data.unlink_containers()

    # @line_profile
    def compute_function_interpolated(self):
        """
        Version of the compute function that does use interpolation between nodes.
        """
        nsq_units = nsq.Const()
        # We need to make two evolutions, one for numu and the other for nue.
        # These produce neutrino and antineutrino states at the same time thanks to
        # the "both" neutrino mode of nuSQuIDS.
        self.apply_prop_settings(self.nus_layer)
        self.set_osc_parameters(self.nus_layer)

        ini_state_nue = np.array([1, 0, 0] + [0] * (self.num_neutrinos - 3))
        ini_state_numu = np.array([0, 1, 0] + [0] * (self.num_neutrinos - 3))
        ini_state_nutau = np.array([0, 0, 1] + [0] * (self.num_neutrinos - 3))

        self.nus_layer.Set_initial_state(ini_state_nue, nsq.Basis.flavor)
        if not self.vacuum:
            self.nus_layer.EvolveState()
        evolved_states_nue = self.nus_layer.GetStates(0)
        evolved_states_nuebar = self.nus_layer.GetStates(1)

        self.nus_layer.Set_initial_state(ini_state_numu, nsq.Basis.flavor)
        if not self.vacuum:
            self.nus_layer.EvolveState()
        evolved_states_numu = self.nus_layer.GetStates(0)
        evolved_states_numubar = self.nus_layer.GetStates(1)

        if self.use_taus:
            self.nus_layer.Set_initial_state(ini_state_nutau, nsq.Basis.flavor)
            if not self.vacuum:
                self.nus_layer.EvolveState()
            evolved_states_nutau = self.nus_layer.GetStates(0)
            evolved_states_nutaubar = self.nus_layer.GetStates(1)

        # Now comes the step where we interpolate the interaction picture states
        # and project out oscillation probabilities. This can be done in either events
        # or binned mode.
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers(
                "nu", ["nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc", "nutau_nc"]
            )
            self.data.link_containers(
                "nubar",
                [
                    "nuebar_cc",
                    "numubar_cc",
                    "nutaubar_cc",
                    "nuebar_nc",
                    "numubar_nc",
                    "nutaubar_nc",
                ],
            )
        for container in self.data:
            nubar = container["nubar"] < 0
            container["interp_states_e"] = self.calc_interpolated_states(
                evolved_states_nuebar if nubar else evolved_states_nue,
                container["true_energy"] * nsq_units.GeV,
                container["true_coszen"],
            )
            container["interp_states_mu"] = self.calc_interpolated_states(
                evolved_states_numubar if nubar else evolved_states_numu,
                container["true_energy"] * nsq_units.GeV,
                container["true_coszen"],
            )
            if self.use_taus:
                container["interp_states_tau"] = self.calc_interpolated_states(
                    evolved_states_nutaubar if nubar else evolved_states_nutau,
                    container["true_energy"] * nsq_units.GeV,
                    container["true_coszen"],
                )
        self.data.unlink_containers()

        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar", ["nutaubar_cc", "nutaubar_nc"])

        for container in self.data:

            nubar = container["nubar"] < 0
            flav_out = container["flav"]
            input_flavs = ["e", "mu", "tau"] if self.use_taus else ["e", "mu"]

            for flav_in in input_flavs:
                container["prob_" + flav_in] = self.calc_probs_interp(
                    flav_out=flav_out,
                    nubar=nubar,
                    interp_states=container["interp_states_" + flav_in],
                    out_distances=container["tot_distances"] * nsq_units.km,
                    e_out=container["true_energy"] * nsq_units.GeV,
                    avg_ranges=container["avg_ranges"] * nsq_units.km,
                    lowpass_cutoff=container["lowpass_cutoff"] / nsq_units.km,
                )

                # It is possible to get slightly negative probabilities from imperfect
                # state interpolation between nodes.
                # It's impractical to avoid any probability dipping below zero in every
                # conceivable situation because that would require very dense node
                # spacing. We get around this by flooring the probability at zero.
                # However, dipping below zero by more than 1% may indicate that nodes
                # aren't spaced tightly enough to achieve an acceptable accuracy, so we
                # issue a warning.
                if (
                    np.any(container["prob_" + flav_in] < -0.01)
                    and not self.interpolation_warning_issued
                ):
                    mask = container["prob_" + flav_in] < -0.01
                    en_med = np.median(container["true_energy"][mask])
                    cz_med = np.median(container["true_coszen"][mask])
                    logging.warn(
                        f"Some probabilities in nu_{flav_in} -> {container.name} dip "
                        "below zero by more than 1%! This may indicate too few nodes "
                        f"in the problematic region. Median energy: {en_med}, median "
                        f"coszen: {cz_med}. This warning is only issued once."
                    )
                    self.interpolation_warning_issued = True
                container["prob_" + flav_in][container["prob_" + flav_in] < 0] = 0.0
            container.mark_changed("prob_e")
            container.mark_changed("prob_mu")
            if self.use_taus:
                container.mark_changed("prob_tau")
        self.data.unlink_containers()

    def compute_function(self):
        if self.node_mode == "events" or self.exact_mode:
            self.compute_function_no_interpolation()
        else:
            self.compute_function_interpolated()

    @profile
    def apply_function(self):
        for container in self.data:
            scales = (
                container["nu_flux"][:, 0] * container["prob_e"]
                + container["nu_flux"][:, 1] * container["prob_mu"]
            )
            if self.use_taus:
                scales += container["nu_flux"][:, 2] * container["prob_tau"]
            container["weights"] = container["weights"] * scales
Esempio n. 5
0
    def setup_function(self):

        earth_model = find_resource(self.earth_model)
        prop_height = self.prop_height
        detector_depth = self.detector_depth
        self.layers = Layers(earth_model, detector_depth, prop_height)
        # We must treat densities and electron fractions correctly here, so we set them
        # to 1 in the Layers module to get unweighted densities.
        self.layers.setElecFrac(1, 1, 1)

        nsq_units = nsq.Const()  # natural units for nusquids
        # Because we don't want to extrapolate, we check that all points at which we
        # want to evaluate probabilities are fully contained within the node specs. This
        # is of course not necessary in events mode.
        if isinstance(self.node_mode, MultiDimBinning) and not self.exact_mode:
            logging.debug("setting up nuSQuIDS nodes in binned mode")
            # we can prepare the calculator like this only in binned mode, see
            # compute_function for node_mode == "events"
            self.data.representation = self.calc_mode
            for container in self.data:
                for var in ["true_coszen", "true_energy"]:
                    unit = "dimensionless" if var == "true_coszen" else "GeV"
                    upper_bound = np.max(self.node_mode[var].bin_edges.m_as(unit))
                    lower_bound = np.min(self.node_mode[var].bin_edges.m_as(unit))
                    err_msg = (
                        "The outer edges of the node_mode must encompass "
                        "the entire range of calc_specs to avoid extrapolation"
                    )
                    if np.any(container[var] > upper_bound):
                        maxval = np.max(container[var])
                        raise ValueError(
                            err_msg + f"\nmax input: {maxval}, upper "
                            f"bound: {upper_bound}"
                        )
                    if np.any(container[var] < lower_bound):
                        minval = np.max(container[var])
                        raise ValueError(
                            err_msg + f"\nmin input: {minval}, lower "
                            f"bound: {lower_bound}"
                        )

            # Layers in nuSQuIDS are special: We need all the individual distances and
            # densities for the nodes to solve the interaction picture states, but on
            # the final calculation grid (or events) we only need the *total* traversed
            # distance. Because we are placing nodes at the bin edges rather than the
            # bin middle, this doesn't really fit with how containers store data, so we
            # are making arrays as variables that never go into the container.

            # These are stored because we need them later during interpolation
            self.coszen_node_mode = self.node_mode["true_coszen"].bin_edges.m_as(
                "dimensionless"
            )
            self.e_node_mode = self.node_mode["true_energy"].bin_edges.m_as("GeV")
            logging.debug(
                f"Setting up nodes at\n"
                f"cos_zen = \n{self.coszen_node_mode}\n"
                f"energy = \n{self.e_node_mode}\n"
            )
            # things are getting a bit meshy from here...
            self.e_mesh, self.cosz_mesh = np.meshgrid(
                self.e_node_mode, self.coszen_node_mode
            )
            e_nodes = self.e_mesh.ravel()
            coszen_nodes = self.cosz_mesh.ravel()

            # The lines below should not be necessary because we will always get at
            # least two numbers from the bin edges. However, if either energy or coszen
            # somehow was just a scalar, we would need to broadcast it out to the same
            # size. Keeping the code in here in case you want to use the stage in 1D.
            # convert lists to ndarrays and scalars to ndarrays with length 1
            e_nodes = np.atleast_1d(e_nodes)
            coszen_nodes = np.atleast_1d(coszen_nodes)
            # broadcast against each other and make a copy
            # (see https://numpy.org/doc/stable/reference/generated/numpy.broadcast_arrays.html)
            e_nodes, coszen_nodes = [
                np.array(a) for a in np.broadcast_arrays(e_nodes, coszen_nodes)
            ]

            assert len(e_nodes) == len(coszen_nodes)
            assert coszen_nodes.ndim == 1
            assert e_nodes.ndim == 1

            self.layers.calcLayers(coszen_nodes)
            distances = np.reshape(
                self.layers.distance, (len(e_nodes), self.layers.max_layers)
            )
            densities = np.reshape(
                self.layers.density, (len(e_nodes), self.layers.max_layers)
            )
            # HACK: We need the correct electron densities for each layer. We can
            # determine whether we are in the core or mantle based on the density.
            # Needless to say it isn't optimal to have these numbers hard-coded.
            ye = np.zeros_like(densities)
            ye[densities < 10] = self.YeM
            ye[(densities >= 10) & (densities < 13)] = self.YeO
            ye[densities >= 13] = self.YeI
            self.nus_layer = self.nusquids_layers_class(
                distances * nsq_units.km,
                densities,
                ye,
                e_nodes * nsq_units.GeV,
                self.num_neutrinos,
                nsq.NeutrinoType.both,
            )
            self.apply_prop_settings(self.nus_layer)

        # Now that we have our nusquids calculator set up on the node grid, we make
        # container output space for the probability output which may be on a finer grid
        # than the nodes or even working in events mode.
        self.data.representation = self.calc_mode

        # --- calculate the layers ---
        if isinstance(self.calc_mode, MultiDimBinning):
            # as layers don't care about flavour
            self.data.link_containers(
                "nu",
                [
                    "nue_cc",
                    "numu_cc",
                    "nutau_cc",
                    "nue_nc",
                    "numu_nc",
                    "nutau_nc",
                    "nuebar_cc",
                    "numubar_cc",
                    "nutaubar_cc",
                    "nuebar_nc",
                    "numubar_nc",
                    "nutaubar_nc",
                ],
            )

        # calculate the distance difference between minimum and maximum production
        # height, if applicable
        if self.avg_height:
            layers_min = Layers(
                earth_model,
                detector_depth,
                self.prop_height - self.prop_height_range / 2.0,
            )
            layers_min.setElecFrac(1, 1, 1)
            layers_max = Layers(
                earth_model,
                detector_depth,
                self.prop_height + self.prop_height_range / 2.0,
            )
            layers_max.setElecFrac(1, 1, 1)

        for container in self.data:
            self.layers.calcLayers(container["true_coszen"])
            distances = self.layers.distance.reshape((container.size, -1))
            tot_distances = np.sum(distances, axis=1)
            if self.avg_height:
                layers_min.calcLayers(container["true_coszen"])
                dists_min = layers_min.distance.reshape((container.size, -1))
                min_tot_dists = np.sum(dists_min, axis=1)

                layers_max.calcLayers(container["true_coszen"])
                dists_max = layers_max.distance.reshape((container.size, -1))
                max_tot_dists = np.sum(dists_max, axis=1)
                # nuSQuIDS assumes the original distance is the longest distance and
                # the averaging range is the difference between the minimum and maximum
                # distance.
                avg_ranges = max_tot_dists - min_tot_dists
                tot_distances = max_tot_dists
                assert np.all(avg_ranges > 0)
            # If the low-pass cutoff is zero, nusquids will not evaluate the filter.
            container["lowpass_cutoff"] = self.eval_lowpass_cutoff * np.ones(
                container.size
            )
            if not self.apply_lowpass_above_hor:
                container["lowpass_cutoff"] = np.where(
                    container["true_coszen"] >= 0, 0, container["lowpass_cutoff"]
                )
            if isinstance(self.node_mode, MultiDimBinning) and not self.exact_mode:
                # To project out probabilities we only need the *total* distance
                container["tot_distances"] = tot_distances
                if self.avg_height:
                    container["avg_ranges"] = avg_ranges
                else:
                    container["avg_ranges"] = np.zeros(container.size, dtype=FTYPE)
                if not self.apply_height_avg_below_hor:
                    container["avg_ranges"] = np.where(
                        container["true_coszen"] >= 0, container["avg_ranges"], 0.0
                    )
            elif self.node_mode == "events" or self.exact_mode:
                # in any other mode (events or exact) we store all densities and
                # distances in the container in calc_specs
                densities = self.layers.density.reshape((container.size, -1))
                container["densities"] = densities
                container["distances"] = distances

        self.data.unlink_containers()

        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar", ["nutaubar_cc", "nutaubar_nc"])

        # setup more empty arrays
        for container in self.data:
            container["prob_e"] = np.empty((container.size), dtype=FTYPE)
            container["prob_mu"] = np.empty((container.size), dtype=FTYPE)
            if self.use_taus:
                container["prob_tau"] = np.empty((container.size), dtype=FTYPE)

        self.data.unlink_containers()

        if self.exact_mode:
            return

        # --- containers for interpolated states ---
        # This is not needed in exact mode
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers(
                "nu", ["nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc", "nutau_nc"]
            )
            self.data.link_containers(
                "nubar",
                [
                    "nuebar_cc",
                    "numubar_cc",
                    "nutaubar_cc",
                    "nuebar_nc",
                    "numubar_nc",
                    "nutaubar_nc",
                ],
            )
        for container in self.data:
            container["interp_states_e"] = np.empty(
                (container.size, self.num_neutrinos ** 2),
                dtype=FTYPE,
            )
            container["interp_states_mu"] = np.empty(
                (container.size, self.num_neutrinos ** 2),
                dtype=FTYPE,
            )
            container["interp_states_tau"] = np.empty(
                (container.size, self.num_neutrinos ** 2),
                dtype=FTYPE,
            )
        self.data.unlink_containers()
        self.interpolation_warning_issued = False
Esempio n. 6
0
class nusquids(Stage):
    """
    PISA Pi stage for weighting events due to the effect of neutrino oscillations, using
    nuSQuIDS as the oscillation probability calculator. One specialty here is that we
    have to specify an additional binning to determine where to place nodes for the
    exact calculation. The points where the actual probability is evaluated is
    determined by calc_mode as usual and may be much finer than node_mode or even
    event-wise since the interpolation step is fast.

    Parameters
    ----------

    Uses the standard parameters as required by a PISA pi stage
    (see `pisa/core/stage.py`)
    
    node_mode : MultiDimBinning
        Binning to determine where to place nodes at which the evaluation of interaction
        states occurs. The nodes are places at the _corners_ of the binning to avoid
        extrapolation.

    use_decoherence : bool
        set to true to include neutrino decoherence in the oscillation probability
        calculation

    num_decoherence_gamma : int
        number of decoherence gamma parameters to be considered in the decoherence model
        must be either 1 or 3

    use_nsi : bool
        set to true to include Non-Standard Interactions (NSI) in the oscillation
        probability calculation

    num_neutrinos : int
        Number of neutrino flavors to include. This stage supports 3 or 4 flavors, but
        nuSQuIDS allows up to 6 such that this stage could easily be expanded.
    
    earth_model : str
        Path to Earth model (PREM) file.
    
    detector_depth : quantity (distance)
    
    prop_height : quantity (distance) or str
        Height at which neutrinos are produced. If a quantity is given, the height is
        assumed to be the same for all neutrinos. An alternative is to pass
        `from_container`. In that case, the stage will search the input container for a
        key `prop_height` and take the height from there on a bin-wise or event-wise
        basis depending on `calc_specs`.
    
    prop_height_min : quantity (distance)
        Minimum production height (optional). If this value is passed probabilities are
        averaged between the maximum production height in `prop_height` and this value
        under the assumption of a uniform production height distribution.

    YeI : quantity (dimensionless)
        Inner electron fraction.
    
    YeO : quantity (dimensionless)
        Outer electron fraction.
        
    YeM : quantity (dimensionless)
        Mantle electron fraction.

    rel_err : float
        Relative error of the numerical integration

    abs_err : float
        Absolute error of the numerical integration
    
    prop_lowpass_cutoff : quantity (1/distance)
        Frequency cutoff for fast oscillations applied during numerical integration
        of the interaction state. The frequency is passed as oscillations per distance.
        A reasonable order of magnitude would allow ~100 oscillations over 12000 km.
    
    prop_lowpass_frac : quantity (dimensionless)
        This number determines how harsh the cut-off of the low-pass filter is applied
        during numerical integration is. A value of 0.1 would mean that the filter would
        begin to kick in when 90% of the cutoff frequency is reached and linearly
        decrease oscillation amplitudes until the cutoff is reached.
    
    eval_lowpass_cutoff : quantity (1/distance)
        Same as `prop_lowpass_cutoff`, but applied during evaluation of interpolated 
        states, not during integration.
    
    eval_lowpass_frac : quantity (1/distance)
        Same as `prop_lowpass_frac`, but applied during evaluation of interpolated 
        states, not during integration.
    
    exact_mode : bool
        With this turned on, the probabilities are evaluated using the exact calculation
        for constant densities in every layer without numerical integration. This method
        is much faster than the numerical integration for a node, but you lose the
        option to project out probabilities from interaction picture states. In this
        mode, nuSQuIDS behaves essentially like GLoBES with the same speed trade-off.
        You cannot apply filters in this mode either. Its only recommended use is for
        pseudo-data generation, where you may want an exact event-by-event calculation
        that is allowed to take several minutes.

    params : ParamSet or sequence with which to instantiate a ParamSet.
        Expected params .. ::
            theta12 : quantity (angle)
            theta13 : quantity (angle)
            theta23 : quantity (angle)
            deltam21 : quantity (mass^2)
            deltam31 : quantity (mass^2)
            deltacp : quantity (angle)
        Additional expected params if `num_neutrinos == 4` .. ::
            theta14 : quantity (angle)
            theta24 : quantity (angle)
            deltam41 : quantity (mass^2)
            deltacp14 : quantity (angle)
            deltacp24 : quantity (angle)

    Additional ParamSet params expected when using the `use_decoherence` argument:
        n_energy : quantity (dimensionless)
        * If using `num_decoherence_gamma` == 1:
            gamma : quantity (energy)
        * If using `num_decoherence_gamma` == 3:
            gamma12 : quantity (energy)
            gamma13 : quantity (energy)
            gamma23 : quantity (energy)

    """
    def __init__(
        self,
        earth_model=None,
        detector_depth=None,
        prop_height=None,
        prop_height_min=None,
        YeI=None,
        YeO=None,
        YeM=None,
        rel_err=None,
        abs_err=None,
        prop_lowpass_cutoff=None,
        prop_lowpass_frac=None,
        eval_lowpass_cutoff=None,
        eval_lowpass_frac=None,
        node_mode=None,
        use_decoherence=False,
        num_decoherence_gamma=1,
        use_nsi=False,
        num_neutrinos=3,
        exact_mode=False,
        **std_kwargs,
    ):

        if use_nsi:
            raise NotImplementedError("NSI not implemented")
        if use_decoherence:
            raise NotImplementedError("Decoherence not implemented")
        if type(prop_height) is not ureg.Quantity:
            raise NotImplementedError(
                "Getting propagation heights from containers is "
                "not yet implemented")
        self.num_neutrinos = int(num_neutrinos)
        assert self.num_neutrinos < 5, "currently only supports up to 4 flavor oscillations"
        self.use_nsi = use_nsi
        self.use_decoherence = use_decoherence
        self.num_decoherence_gamma = num_decoherence_gamma
        self.node_mode = node_mode

        self.earth_model = earth_model
        self.YeI = YeI.m_as("dimensionless")
        self.YeO = YeO.m_as("dimensionless")
        self.YeM = YeM.m_as("dimensionless")
        self.detector_depth = detector_depth.m_as("km")
        self.prop_height = prop_height.m_as("km")
        self.avg_height = False
        self.prop_height_min = None
        if prop_height_min is not None:  # this is optional
            self.prop_height_min = prop_height_min.m_as("km")
            self.avg_height = True

        self.layers = None

        self.rel_err = rel_err.m_as(
            "dimensionless") if rel_err is not None else 1.0e-10
        self.abs_err = abs_err.m_as(
            "dimensionless") if abs_err is not None else 1.0e-10
        self.prop_lowpass_cutoff = (prop_lowpass_cutoff.m_as("1/km")
                                    if prop_lowpass_cutoff is not None else 0.)
        self.prop_lowpass_frac = (prop_lowpass_frac.m_as("dimensionless")
                                  if prop_lowpass_frac is not None else 0.)
        self.eval_lowpass_cutoff = (eval_lowpass_cutoff.m_as("1/km")
                                    if eval_lowpass_cutoff is not None else 0.)
        self.eval_lowpass_frac = (eval_lowpass_frac.m_as("dimensionless")
                                  if eval_lowpass_frac is not None else 0.)

        if self.prop_lowpass_frac > 1. or self.eval_lowpass_frac > 1.:
            raise ValueError(
                "lowpass filter fraction cannot be greater than one")

        if self.prop_lowpass_frac < 0. or self.eval_lowpass_frac < 0.:
            raise ValueError(
                "lowpass filter fraction cannot be smaller than zero")

        self.nus_layer = None
        self.nus_layerbar = None

        # Define standard params
        expected_params = [
            "theta12",
            "theta13",
            "theta23",
            "deltam21",
            "deltam31",
            "deltacp",
        ]

        # Add decoherence parameters
        assert self.num_decoherence_gamma in [
            1, 3
        ], ("Must choose either 1 or 3 "
            "decoherence gamma parameters")
        if self.use_decoherence:
            if self.num_decoherence_gamma == 1:
                expected_params.extend(["gamma"])
            elif self.num_decoherence_gamma == 3:
                expected_params.extend(["gamma21", "gamma31", "gamma32"])
            expected_params.extend(["n_energy"])

        # We may want to reparametrize this with the difference between deltacp14 and
        # deltacp24, as the absolute value seems to play a small role (see
        # https://arxiv.org/pdf/2010.06321.pdf)
        if self.num_neutrinos == 4:
            expected_params.extend([
                "theta14",
                "theta24",
                "theta34",
                "deltam41",
                "deltacp14",
                "deltacp24",
            ])

        # init base class
        super().__init__(
            expected_params=expected_params,
            **std_kwargs,
        )

        # This is special: We have an additional "binning" to account for. It is in
        # principle possible to work in event mode even for the nodes, which would mean
        # that the full oscillation problem is solved for all events individually.
        # Together with the constant oscillation mode, this can be used to calculate
        # probabilities in exact mode in a time that is reasonable at least for
        # generating pseudodata.

        assert not (self.use_nsi and self.use_decoherence), (
            "NSI and decoherence not "
            "suported together, must use one or the other")

        self.exact_mode = exact_mode

        if exact_mode:
            # No interpolation is happening in exact mode so any passed node_mode
            # will be ignored. Probabilities are calculated at calc_specs.
            if self.node_mode is not None:
                logging.warn(
                    "nuSQuIDS is configured in exact mode, the passed "
                    f"`node_mode`\n({self.node_mode})\n will be ignored!")
            if self.prop_lowpass_cutoff > 0 or self.eval_lowpass_cutoff > 0:
                logging.warn(
                    "nuSQuIDS is configured in exact mode, low-pass filters "
                    "will be ignored")
        else:
            if isinstance(self.calc_mode, MultiDimBinning):
                assert isinstance(self.node_mode, MultiDimBinning), (
                    "cannot use "
                    "event-wise nodes with binned calculation")

        self.e_node_mode = None
        self.e_mesh = None
        self.coszen_node_mode = None
        self.cosz_mesh = None

    def set_osc_parameters(self, nus_layer):
        # nuSQuIDS uses zero-index for mixing angles
        nus_layer.Set_MixingAngle(0, 1, self.params.theta12.value.m_as("rad"))
        nus_layer.Set_MixingAngle(0, 2, self.params.theta13.value.m_as("rad"))
        nus_layer.Set_MixingAngle(1, 2, self.params.theta23.value.m_as("rad"))

        # mass differences in nuSQuIDS are always w.r.t. m_1
        nus_layer.Set_SquareMassDifference(
            1, self.params.deltam21.value.m_as("eV**2"))
        nus_layer.Set_SquareMassDifference(
            2, self.params.deltam31.value.m_as("eV**2"))

        nus_layer.Set_CPPhase(0, 2, self.params.deltacp.value.m_as("rad"))

        if self.num_neutrinos == 3: return

        nus_layer.Set_MixingAngle(0, 3, self.params.theta14.value.m_as("rad"))
        nus_layer.Set_MixingAngle(1, 3, self.params.theta24.value.m_as("rad"))
        nus_layer.Set_MixingAngle(2, 3, self.params.theta34.value.m_as("rad"))
        nus_layer.Set_SquareMassDifference(
            3, self.params.deltam41.value.m_as("eV**2"))
        nus_layer.Set_CPPhase(0, 3, self.params.deltacp14.value.m_as("rad"))
        nus_layer.Set_CPPhase(1, 3, self.params.deltacp24.value.m_as("rad"))
        # TODO: Implement NSI, decoherence

    def apply_prop_settings(self, nus_layer):
        nsq_units = nsq.Const()
        nus_layer.Set_rel_error(self.rel_err)
        nus_layer.Set_abs_error(self.abs_err)
        nus_layer.Set_EvolLowPassCutoff(self.prop_lowpass_cutoff /
                                        nsq_units.km)
        # The ramp of the low-pass filter starts to drop at (cutoff - scale)
        scale = self.prop_lowpass_frac * self.prop_lowpass_cutoff / nsq_units.km
        nus_layer.Set_EvolLowPassScale(scale)
        nus_layer.Set_AllowConstantDensityOscillationOnlyEvolution(
            self.exact_mode)

    def setup_function(self):

        earth_model = find_resource(self.earth_model)
        prop_height = self.prop_height
        detector_depth = self.detector_depth
        self.layers = Layers(earth_model, detector_depth, prop_height)
        self.layers.setElecFrac(self.YeI, self.YeO, self.YeM)

        nsq_units = nsq.Const()  # natural units for nusquids
        # Because we don't want to extrapolate, we check that all points at which we
        # want to evaluate probabilities are fully contained within the node specs. This
        # is of course not necessary in events mode.
        if isinstance(self.node_mode, MultiDimBinning) and not self.exact_mode:
            logging.debug("setting up nuSQuIDS nodes in binned mode")
            # we can prepare the calculator like this only in binned mode, see
            # compute_function for node_mode == "events"
            self.data.representation = self.calc_mode
            for container in self.data:
                for var in ["true_coszen", "true_energy"]:
                    upper_bound = np.max(self.node_mode[var].bin_edges)
                    lower_bound = np.min(self.node_mode[var].bin_edges)
                    err_msg = (
                        "The outer edges of the node_mode must encompass "
                        "the entire range of calc_specs to avoid extrapolation"
                    )
                    if np.any(container[var] > upper_bound):
                        maxval = np.max(container[var])
                        raise ValueError(err_msg +
                                         f"\nmax input: {maxval}, upper "
                                         f"bound: {upper_bound}")
                    if np.any(container[var] < lower_bound):
                        minval = np.max(container[var])
                        raise ValueError(err_msg +
                                         f"\nmin input: {minval}, lower "
                                         f"bound: {lower_bound}")

            # Layers in nuSQuIDS are special: We need all the individual distances and
            # densities for the nodes to solve the interaction picture states, but on
            # the final calculation grid (or events) we only need the *total* traversed
            # distance. Because we are placing nodes at the bin edges rather than the
            # bin middle, this doesn't really fit with how containers store data, so we
            # are making arrays as variables that never go into the container.

            # These are stored because we need them later during interpolation
            self.coszen_node_mode = self.node_mode[
                "true_coszen"].bin_edges.m_as("dimensionless")
            self.e_node_mode = self.node_mode["true_energy"].bin_edges.m_as(
                "GeV")
            logging.debug(f"Setting up nodes at\n"
                          f"cos_zen = \n{self.coszen_node_mode}\n"
                          f"energy = \n{self.e_node_mode}\n")
            # things are getting a bit meshy from here...
            self.e_mesh, self.cosz_mesh = np.meshgrid(self.e_node_mode,
                                                      self.coszen_node_mode)
            e_nodes = self.e_mesh.ravel()
            coszen_nodes = self.cosz_mesh.ravel()

            # The lines below should not be necessary because we will always get at
            # least two numbers from the bin edges. However, if either energy or coszen
            # somehow was just a scalar, we would need to broadcast it out to the same
            # size. Keeping the code in here in case you want to use the stage in 1D.
            # convert lists to ndarrays and scalars to ndarrays with length 1
            e_nodes = np.atleast_1d(e_nodes)
            coszen_nodes = np.atleast_1d(coszen_nodes)
            # broadcast against each other and make a copy
            # (see https://numpy.org/doc/stable/reference/generated/numpy.broadcast_arrays.html)
            e_nodes, coszen_nodes = [
                np.array(a)
                for a in np.broadcast_arrays(e_nodes, coszen_nodes)
            ]

            assert len(e_nodes) == len(coszen_nodes)
            assert coszen_nodes.ndim == 1
            assert e_nodes.ndim == 1

            self.layers.calcLayers(coszen_nodes)
            distances = np.reshape(self.layers.distance,
                                   (len(e_nodes), self.layers.max_layers))
            densities = np.reshape(self.layers.density,
                                   (len(e_nodes), self.layers.max_layers))
            # electron fraction is already included by multiplying the densities with
            # them in the Layers module, so we pass 1. to nuSQuIDS (unless energies are
            # very high, this should be equivalent).
            ye = np.broadcast_to(np.array([1.]),
                                 (len(e_nodes), self.layers.max_layers))
            self.nus_layer = nsq.nuSQUIDSLayers(
                distances * nsq_units.km,
                densities,
                ye,
                e_nodes * nsq_units.GeV,
                self.num_neutrinos,
                nsq.NeutrinoType.both,
            )
            self.apply_prop_settings(self.nus_layer)

        # Now that we have our nusquids calculator set up on the node grid, we make
        # container output space for the probability output which may be on a finer grid
        # than the nodes or even working in events mode.
        self.data.representation = self.calc_mode

        # --- calculate the layers ---
        if isinstance(self.calc_mode, MultiDimBinning):
            # as layers don't care about flavour
            self.data.link_containers("nu", [
                "nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc",
                "nutau_nc", "nuebar_cc", "numubar_cc", "nutaubar_cc",
                "nuebar_nc", "numubar_nc", "nutaubar_nc"
            ])
        # calculate the distance difference between minimum and maximum production
        # height, if applicable
        if self.avg_height:
            layers_min = Layers(earth_model, detector_depth,
                                self.prop_height_min)
            layers_min.setElecFrac(self.YeI, self.YeO, self.YeM)
        for container in self.data:
            self.layers.calcLayers(container["true_coszen"])
            distances = self.layers.distance.reshape(
                (container.size, self.layers.max_layers))
            tot_distances = np.sum(distances, axis=1)
            if self.avg_height:
                layers_min.calcLayers(container["true_coszen"])
                dists_min = layers_min.distance.reshape(
                    (container.size, self.layers.max_layers))
                min_tot_dists = np.sum(dists_min, axis=1)
                # nuSQuIDS assumes the original distance is the longest distance and
                # the averaging range is the difference between the minimum and maximum
                # distance.
                avg_ranges = tot_distances - min_tot_dists
                assert np.all(avg_ranges > 0)
            if isinstance(self.node_mode,
                          MultiDimBinning) and not self.exact_mode:
                # To project out probabilities we only need the *total* distance
                container["tot_distances"] = tot_distances
                # for the binned node_mode we already calculated layers above
                if self.avg_height:
                    container["avg_ranges"] = avg_ranges
            elif self.node_mode == "events" or self.exact_mode:
                # in any other mode (events or exact) we store all densities and
                # distances in the container in calc_specs
                densities = self.layers.density.reshape(
                    (container.size, self.layers.max_layers))
                container["densities"] = densities
                container["distances"] = distances

        self.data.unlink_containers()

        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar",
                                      ["nutaubar_cc", "nutaubar_nc"])

        # setup more empty arrays
        for container in self.data:
            container["prob_e"] = np.empty((container.size), dtype=FTYPE)
            container["prob_mu"] = np.empty((container.size), dtype=FTYPE)
        self.data.unlink_containers()

        if self.exact_mode: return

        # --- containers for interpolated states ---
        # This is not needed in exact mode
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nu", [
                "nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc",
                "nutau_nc"
            ])
            self.data.link_containers("nubar", [
                "nuebar_cc", "numubar_cc", "nutaubar_cc", "nuebar_nc",
                "numubar_nc", "nutaubar_nc"
            ])
        for container in self.data:
            container["interp_states_e"] = np.empty(
                (container.size, self.num_neutrinos**2),
                dtype=FTYPE,
            )
            container["interp_states_mu"] = np.empty(
                (container.size, self.num_neutrinos**2),
                dtype=FTYPE,
            )
        self.data.unlink_containers()

    # @line_profile
    def calc_node_probs(self, nus_layer, flav_in, flav_out, n_nodes):
        """
        Evaluate oscillation probabilities at nodes. This does not require any
        interpolation.
        """
        ini_state = np.array([0] * self.num_neutrinos)
        ini_state[flav_in] = 1
        nus_layer.Set_initial_state(ini_state, nsq.Basis.flavor)
        nus_layer.EvolveState()
        prob_nodes = nus_layer.EvalFlavorAtNodes(flav_out)
        return prob_nodes

    def calc_interpolated_states(self, evolved_states, e_out, cosz_out):
        """
        Calculate interpolated states at the energies and zenith angles requested.
        """
        nsq_units = nsq.Const()
        interp_states = np.zeros((e_out.size, evolved_states.shape[1]))

        assert np.all(e_out <= np.max(self.e_node_mode * nsq_units.GeV))
        assert np.all(e_out >= np.min(self.e_node_mode * nsq_units.GeV))
        assert np.all(cosz_out <= np.max(self.coszen_node_mode))
        assert np.all(cosz_out >= np.min(self.coszen_node_mode))

        for i in range(evolved_states.shape[1]):
            z = evolved_states[:, i].reshape(self.e_mesh.shape).T
            assert np.all(np.isfinite(z))
            # RectBivariateSpline takes in the 1D node position and assumes that they
            # are on a mesh.
            f = RectBivariateSpline(
                np.log10(self.e_node_mode * nsq_units.GeV),
                self.coszen_node_mode,
                z,
                kx=2,
                ky=2,
            )
            interp_states[..., i] = f(np.log10(e_out), cosz_out, grid=False)
        return interp_states

    def calc_probs_interp(self,
                          flav_out,
                          nubar,
                          interp_states,
                          out_distances,
                          e_out,
                          avg_ranges=0):
        """
        Project out probabilities from interpolated interaction picture states.
        """
        nsq_units = nsq.Const()

        prob_interp = np.zeros(e_out.size)
        scale = self.eval_lowpass_frac * self.eval_lowpass_cutoff / nsq_units.km
        prob_interp = self.nus_layer.EvalWithState(
            flav_out,
            out_distances,
            e_out,
            interp_states,
            avr_scale=0.,
            rho=int(nubar),
            lowpass_cutoff=self.eval_lowpass_cutoff / nsq_units.km,
            lowpass_scale=scale,
            t_range=avg_ranges)
        return prob_interp

    def compute_function_no_interpolation(self):
        """
        Version of the compute function that does not use any interpolation between
        nodes.
        """
        nsq_units = nsq.Const()
        # it is possible to work in binned calc mode while being in exact mode
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar",
                                      ["nutaubar_cc", "nutaubar_nc"])
        for container in self.data:
            nubar = container["nubar"] < 0
            flav = container["flav"]
            # electron fraction is already included by multiplying the densities
            # with them in the Layers module, so we pass 1. to nuSQuIDS (unless
            # energies are very high, this should be equivalent).
            ye = np.broadcast_to(np.array([1.]),
                                 (container.size, self.layers.max_layers))
            nus_layer = nsq.nuSQUIDSLayers(
                container["distances"] * nsq_units.km,
                container["densities"],
                ye,
                container["true_energy"] * nsq_units.GeV,
                self.num_neutrinos,
                nsq.NeutrinoType.antineutrino
                if nubar else nsq.NeutrinoType.neutrino,
            )
            self.apply_prop_settings(nus_layer)
            self.set_osc_parameters(nus_layer)
            container["prob_e"] = self.calc_node_probs(nus_layer, 0, flav,
                                                       container.size)
            container["prob_mu"] = self.calc_node_probs(
                nus_layer, 1, flav, container.size)

            container.mark_changed("prob_e")
            container.mark_changed("prob_mu")
        self.data.unlink_containers()

    @profile
    def compute_function_interpolated(self):
        """
        Version of the compute function that does use interpolation between nodes.
        """
        nsq_units = nsq.Const()
        # We need to make two evolutions, one for numu and the other for nue.
        # These produce neutrino and antineutrino states at the same time thanks to
        # the "both" neutrino mode of nuSQuIDS.
        self.apply_prop_settings(self.nus_layer)
        self.set_osc_parameters(self.nus_layer)

        ini_state_nue = np.array([1, 0] + [0] * (self.num_neutrinos - 2))
        ini_state_numu = np.array([0, 1] + [0] * (self.num_neutrinos - 2))

        self.nus_layer.Set_initial_state(ini_state_nue, nsq.Basis.flavor)
        self.nus_layer.EvolveState()
        evolved_states_nue = self.nus_layer.GetStates(0)
        evolved_states_nuebar = self.nus_layer.GetStates(1)

        self.nus_layer.Set_initial_state(ini_state_numu, nsq.Basis.flavor)
        self.nus_layer.EvolveState()
        evolved_states_numu = self.nus_layer.GetStates(0)
        evolved_states_numubar = self.nus_layer.GetStates(1)

        # Now comes the step where we interpolate the interaction picture states
        # and project out oscillation probabilities. This can be done in either events
        # or binned mode.
        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nu", [
                "nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc",
                "nutau_nc"
            ])
            self.data.link_containers("nubar", [
                "nuebar_cc", "numubar_cc", "nutaubar_cc", "nuebar_nc",
                "numubar_nc", "nutaubar_nc"
            ])
        for container in self.data:
            nubar = container["nubar"] < 0
            container["interp_states_e"] = self.calc_interpolated_states(
                evolved_states_nuebar if nubar else evolved_states_nue,
                container["true_energy"] * nsq_units.GeV,
                container["true_coszen"])
            container["interp_states_mu"] = self.calc_interpolated_states(
                evolved_states_numubar if nubar else evolved_states_numu,
                container["true_energy"] * nsq_units.GeV,
                container["true_coszen"])
        self.data.unlink_containers()

        if isinstance(self.calc_mode, MultiDimBinning):
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar",
                                      ["nutaubar_cc", "nutaubar_nc"])

        for container in self.data:
            nubar = container["nubar"] < 0
            flav_out = container["flav"]
            for flav_in in ["e", "mu"]:
                container["prob_" + flav_in] = self.calc_probs_interp(
                    flav_out, nubar, container["interp_states_" + flav_in],
                    container["tot_distances"] * nsq_units.km,
                    container["true_energy"] * nsq_units.GeV,
                    container["avg_ranges"] *
                    nsq_units.km if self.avg_height else 0.)
            container.mark_changed("prob_e")
            container.mark_changed("prob_mu")
        self.data.unlink_containers()

    def compute_function(self):
        if self.node_mode == "events" or self.exact_mode:
            self.compute_function_no_interpolation()
        else:
            self.compute_function_interpolated()

    @profile
    def apply_function(self):
        for container in self.data:
            scales = container['nu_flux'][:, 0] * container[
                'prob_e'] + container['nu_flux'][:, 1] * container['prob_mu']
            container['weights'] = container["weights"] * scales
Esempio n. 7
0
    def setup_function(self):

        earth_model = find_resource(self.earth_model)
        prop_height = self.prop_height
        detector_depth = self.detector_depth
        self.layers = Layers(earth_model, detector_depth, prop_height)
        self.layers.setElecFrac(self.YeI, self.YeO, self.YeM)

        nsq_units = nsq.Const()  # natural units for nusquids
        # Because we don't want to extrapolate, we check that all points at which we
        # want to evaluate probabilities are fully contained within the node specs. This
        # is of course not necessary in events mode.
        if self.node_mode == "binned" and not self.exact_mode:
            logging.debug("setting up nuSQuIDS nodes in binned mode")
            # we can prepare the calculator like this only in binned mode, see
            # compute_function for node_mode == "events"
            self.data.data_specs = self.calc_specs
            for container in self.data:
                for var in ["true_coszen", "true_energy"]:
                    upper_bound = np.max(self.node_specs[var].bin_edges)
                    lower_bound = np.min(self.node_specs[var].bin_edges)
                    err_msg = (
                        "The outer edges of the node_specs must encompass "
                        "the entire range of calc_specs to avoid extrapolation"
                    )
                    if np.any(container[var].get(WHERE) > upper_bound):
                        maxval = np.max(container[var].get(WHERE))
                        raise ValueError(err_msg +
                                         f"\nmax input: {maxval}, upper "
                                         f"bound: {upper_bound}")
                    if np.any(container[var].get(WHERE) < lower_bound):
                        minval = np.max(container[var].get(WHERE))
                        raise ValueError(err_msg +
                                         f"\nmin input: {minval}, lower "
                                         f"bound: {lower_bound}")

            # Layers in nuSQuIDS are special: We need all the individual distances and
            # densities for the nodes to solve the interaction picture states, but on
            # the final calculation grid (or events) we only need the *total* traversed
            # distance. Because we are placing nodes at the bin edges rather than the
            # bin middle, this doesn't really fit with how containers store data, so we
            # are making arrays as variables that never go into the container.

            # These are stored because we need them later during interpolation
            self.coszen_node_specs = self.node_specs[
                "true_coszen"].bin_edges.m_as("dimensionless")
            self.e_node_specs = self.node_specs["true_energy"].bin_edges.m_as(
                "GeV")
            logging.debug(f"Setting up nodes at\n"
                          f"cos_zen = \n{self.coszen_node_specs}\n"
                          f"energy = \n{self.e_node_specs}\n")
            # things are getting a bit meshy from here...
            self.e_mesh, self.cosz_mesh = np.meshgrid(self.e_node_specs,
                                                      self.coszen_node_specs)
            e_nodes = self.e_mesh.ravel()
            coszen_nodes = self.cosz_mesh.ravel()

            # The lines below should not be necessary because we will always get at
            # least two numbers from the bin edges. However, if either energy or coszen
            # somehow was just a scalar, we would need to broadcast it out to the same
            # size. Keeping the code in here in case you want to use the stage in 1D.
            # convert lists to ndarrays and scalars to ndarrays with length 1
            e_nodes = np.atleast_1d(e_nodes)
            coszen_nodes = np.atleast_1d(coszen_nodes)
            # broadcast against each other and make a copy
            # (see https://numpy.org/doc/stable/reference/generated/numpy.broadcast_arrays.html)
            e_nodes, coszen_nodes = [
                np.array(a)
                for a in np.broadcast_arrays(e_nodes, coszen_nodes)
            ]

            assert len(e_nodes) == len(coszen_nodes)
            assert coszen_nodes.ndim == 1
            assert e_nodes.ndim == 1

            self.layers.calcLayers(coszen_nodes)
            distances = np.reshape(self.layers.distance,
                                   (len(e_nodes), self.layers.max_layers))
            densities = np.reshape(self.layers.density,
                                   (len(e_nodes), self.layers.max_layers))
            # electron fraction is already included by multiplying the densities with
            # them in the Layers module, so we pass 1. to nuSQuIDS (unless energies are
            # very high, this should be equivalent).
            ye = np.broadcast_to(np.array([1.]),
                                 (len(e_nodes), self.layers.max_layers))
            self.nus_layer = nsq.nuSQUIDSLayers(
                distances * nsq_units.km,
                densities,
                ye,
                e_nodes * nsq_units.GeV,
                self.num_neutrinos,
                nsq.NeutrinoType.both,
            )
            self.apply_prop_settings(self.nus_layer)

        # Now that we have our nusquids calculator set up on the node grid, we make
        # container output space for the probability output which may be on a finer grid
        # than the nodes or even working in events mode.
        self.data.data_specs = self.calc_specs

        # --- calculate the layers ---
        if self.calc_mode == "binned":
            # as layers don't care about flavour
            self.data.link_containers("nu", [
                "nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc",
                "nutau_nc", "nuebar_cc", "numubar_cc", "nutaubar_cc",
                "nuebar_nc", "numubar_nc", "nutaubar_nc"
            ])
        # calculate the distance difference between minimum and maximum production
        # height, if applicable
        if self.avg_height:
            layers_min = Layers(earth_model, detector_depth,
                                self.prop_height_min)
            layers_min.setElecFrac(self.YeI, self.YeO, self.YeM)
        for container in self.data:
            self.layers.calcLayers(container["true_coszen"].get("host"))
            distances = self.layers.distance.reshape(
                (container.size, self.layers.max_layers))
            tot_distances = np.sum(distances, axis=1)
            if self.avg_height:
                layers_min.calcLayers(container["true_coszen"].get("host"))
                dists_min = layers_min.distance.reshape(
                    (container.size, self.layers.max_layers))
                min_tot_dists = np.sum(dists_min, axis=1)
                # nuSQuIDS assumes the original distance is the longest distance and
                # the averaging range is the difference between the minimum and maximum
                # distance.
                avg_ranges = tot_distances - min_tot_dists
                assert np.all(avg_ranges > 0)
            if self.node_mode == "binned" and not self.exact_mode:
                # To project out probabilities we only need the *total* distance
                container["tot_distances"] = tot_distances
                # for the binned node_mode we already calculated layers above
                if self.avg_height:
                    container["avg_ranges"] = avg_ranges
            elif self.node_mode == "events" or self.exact_mode:
                # in any other mode (events or exact) we store all densities and
                # distances in the container in calc_specs
                densities = self.layers.density.reshape(
                    (container.size, self.layers.max_layers))
                container["densities"] = densities
                container["distances"] = distances

        self.data.unlink_containers()

        if self.calc_mode == "binned":
            self.data.link_containers("nue", ["nue_cc", "nue_nc"])
            self.data.link_containers("numu", ["numu_cc", "numu_nc"])
            self.data.link_containers("nutau", ["nutau_cc", "nutau_nc"])
            self.data.link_containers("nuebar", ["nuebar_cc", "nuebar_nc"])
            self.data.link_containers("numubar", ["numubar_cc", "numubar_nc"])
            self.data.link_containers("nutaubar",
                                      ["nutaubar_cc", "nutaubar_nc"])

        # setup more empty arrays
        for container in self.data:
            container["prob_e"] = np.empty((container.size), dtype=FTYPE)
            container["prob_mu"] = np.empty((container.size), dtype=FTYPE)
        self.data.unlink_containers()

        if self.exact_mode: return

        # --- containers for interpolated states ---
        # This is not needed in exact mode
        if self.calc_mode == "binned":
            self.data.link_containers("nu", [
                "nue_cc", "numu_cc", "nutau_cc", "nue_nc", "numu_nc",
                "nutau_nc"
            ])
            self.data.link_containers("nubar", [
                "nuebar_cc", "numubar_cc", "nutaubar_cc", "nuebar_nc",
                "numubar_nc", "nutaubar_nc"
            ])
        for container in self.data:
            container["interp_states_e"] = np.empty(
                (container.size, self.num_neutrinos**2),
                dtype=FTYPE,
            )
            container["interp_states_mu"] = np.empty(
                (container.size, self.num_neutrinos**2),
                dtype=FTYPE,
            )
        self.data.unlink_containers()
Esempio n. 8
0
class globes(Stage):
    """
    GLoBES PISA Pi class

    Parameters
    ----------
    earth_model : PREM file path
    globes_wrapper : path to globes wrapper
    detector_depth : float
    prop_height : quantity (dimensionless)
    params : ParamSet or sequence with which to instantiate a ParamSet.
        Expected params .. ::

            theta12 : quantity (angle)
            theta13 : quantity (angle)
            theta23 : quantity (angle)
            deltam21 : quantity (mass^2)
            deltam31 : quantity (mass^2)
            deltam41 : quantity (mass^2)
            theta24 : quantity (angle)
            theta34 : quantity (angle)
            deltacp : quantity (angle)

    """
    def __init__(
        self,
        earth_model,
        globes_wrapper,
        detector_depth=2.*ureg.km,
        prop_height=20.*ureg.km,
        **std_kwargs,
    ):

        expected_params = (
            'theta12',
            'theta13',
            'theta23',
            'deltam21',
            'deltam31',
            'deltam41',
            'theta24',
            'theta34',
            'deltacp',
        )

        # init base class
        super().__init__(
            expected_params=expected_params,
            **std_kwargs,
        )

        self.layers = None
        self.osc_params = None
        self.earth_model = earth_model
        self.globes_wrapper = globes_wrapper
        self.detector_depth = detector_depth
        self.prop_height = prop_height

        self.globes_calc = None

    @profile
    def setup_function(self):
        sys.path.append(self.globes_wrapper)
        import GLoBES
        ### you need to start GLoBES from the folder containing a dummy experiment
        # therefore we go to the folder, load GLoBES and then go back
        curdir = os.getcwd()
        os.chdir(self.globes_wrapper)
        self.globes_calc = GLoBES.GLoBESCalculator("calc")
        os.chdir(curdir)
        self.globes_calc.InitSteriles(2)
        # object for oscillation parameters
        self.osc_params = OscParams()
        earth_model = find_resource(self.earth_model)
        prop_height = self.prop_height.m_as('km')
        detector_depth = self.detector_depth.m_as('km')
        self.layers = Layers(earth_model, detector_depth, prop_height)
        # The electron fractions are taken into account internally by GLoBES/SNU.
        # See the SNU patch for details. It uses the density to decide
        # whether it is in the core or in the mantle. Therefore, we just multiply by
        # one to give GLoBES the raw densities.
        self.layers.setElecFrac(1., 1., 1.)

        # set the correct data mode
        self.data.representation = self.calc_mode

        # --- calculate the layers ---
        if self.data.is_map:
            # speed up calculation by adding links
            # as layers don't care about flavour
            self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
                                             'nue_nc', 'numu_nc', 'nutau_nc',
                                             'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                                             'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])

        for container in self.data:
            self.layers.calcLayers(container['true_coszen'])
            container['densities'] = self.layers.density.reshape((container.size, self.layers.max_layers))
            container['distances'] = self.layers.distance.reshape((container.size, self.layers.max_layers))

        # don't forget to un-link everything again
        self.data.unlink_containers()

        # setup probability containers
        for container in self.data:
            container['prob_e'] = np.empty((container.size), dtype=FTYPE)
            container['prob_mu'] = np.empty((container.size), dtype=FTYPE)
            container['prob_nonsterile'] = np.empty((container.size), dtype=FTYPE)
            if '_cc' in container.name:
                container['prob_nonsterile'] = np.ones(container.size)
            elif '_nc' in container.name:
                if 'nue' in container.name:
                    container['prob_e'] = np.ones(container.size)
                    container['prob_mu'] = np.zeros(container.size)
                elif 'numu' in container.name:
                    container['prob_e'] = np.zeros(container.size)
                    container['prob_mu'] = np.ones(container.size)
                elif 'nutau' in container.name:
                    container['prob_e'] = np.zeros(container.size)
                    container['prob_mu'] = np.zeros(container.size)
                else:
                    raise Exception('unknown container name: %s' % container.name)

    def calc_prob_e_mu(self, flav, nubar, energy, rho_array, len_array):
        '''Calculates probability for an electron/muon neutrino to oscillate into
        the flavour of a given event, including effects from sterile neutrinos.
        '''
        # We use the layers module to calculate lengths and densities.
        # The output must be converted into a regular python list.
        self.globes_calc.SetManualDensities(list(len_array), list(rho_array))
        # this calls the calculator without the calculation of layers
        # The flavour convention in GLoBES is that
        #  e = 1, mu = 2, tau = 3
        # while in PISA it's
        #  e = 0, mu = 1, tau = 2
        # which is why we add +1 to the flavour.
        # Nubar follows the same convention in PISA and GLoBES:
        #  +1 = particle, -1 = antiparticle
        nue_to_nux = self.globes_calc.MatterProbabilityPrevBaseline(1, flav+1, nubar, energy)
        numu_to_nux = self.globes_calc.MatterProbabilityPrevBaseline(2, flav+1, nubar, energy)
        return (nue_to_nux, numu_to_nux)

    def calc_prob_nonsterile(self, flav, nubar, energy, rho_array, len_array):
        '''Calculates the probability of a given neutrino to oscillate into
        another non-sterile flavour.
        '''
        # We use the layers module to calculate lengths and densities.
        # The output must be converted into a regular python list.
        self.globes_calc.SetManualDensities(list(len_array), list(rho_array))
        # this calls the calculator without the calculation of layers
        # The flavour convention in GLoBES is that
        #  e = 1, mu = 2, tau = 3
        # while in PISA it's
        #  e = 0, mu = 1, tau = 2
        # which is why we add +1 to the flavour.
        # Nubar follows the same convention in PISA and GLoBES:
        #  +1 = particle, -1 = antiparticle
        nux_to_nue = self.globes_calc.MatterProbabilityPrevBaseline(flav+1, 1, nubar, energy)
        nux_to_numu = self.globes_calc.MatterProbabilityPrevBaseline(flav+1, 2, nubar, energy)
        nux_to_nutau = self.globes_calc.MatterProbabilityPrevBaseline(flav+1, 3, nubar, energy)
        nux_to_nonsterile = nux_to_nue + nux_to_numu + nux_to_nutau
        return nux_to_nonsterile

    @profile
    def compute_function(self):
        # --- update mixing params ---
        params = [self.params.theta12.value.m_as('rad'),
                  self.params.theta13.value.m_as('rad'),
                  self.params.theta23.value.m_as('rad'),
                  self.params.deltacp.value.m_as('rad'),
                  self.params.deltam21.value.m_as('eV**2'),
                  self.params.deltam31.value.m_as('eV**2'),
                  self.params.deltam41.value.m_as('eV**2'),
                  0.0,
                  self.params.theta24.value.m_as('rad'),
                  self.params.theta34.value.m_as('rad'),
                  0.0,
                  0.0
                 ]
        self.globes_calc.SetParametersArr(params)
        # set the correct data mode
        self.data.representation = self.calc_mode

        for container in self.data:
            # standard oscillations are only applied to charged current events,
            # while the loss due to oscillation into sterile neutrinos is only
            # applied to neutral current events.
            # Accessing single entries from containers is very slow.
            # For this reason, we make a copy of the content we need that is
            # a simple numpy array.
            flav = container['flav']
            nubar = container['nubar']
            energies = np.array(container['true_energy'])
            densities = np.array(container['densities'])
            distances = np.array(container['distances'])
            prob_e = np.zeros(container.size)
            prob_mu = np.zeros(container.size)
            prob_nonsterile = np.zeros(container.size)
            if '_cc' in container.name:
                for i in range(container.size):
                    prob_e[i], prob_mu[i] = self.calc_prob_e_mu(flav,
                                                                nubar,
                                                                energies[i],
                                                                densities[i],
                                                                distances[i]
                                                               )
                container['prob_e'] = prob_e
                container['prob_mu'] = prob_mu
            elif '_nc' in container.name:
                for i in range(container.size):
                    prob_nonsterile[i] = self.calc_prob_nonsterile(flav,
                                                                   nubar,
                                                                   energies[i],
                                                                   densities[i],
                                                                   distances[i]
                                                                  )
                container['prob_nonsterile'] = prob_nonsterile
            else:
                raise Exception('unknown container name: %s' % container.name)
            container.mark_changed('prob_e')
            container.mark_changed('prob_mu')
            container.mark_changed('prob_nonsterile')

    @profile
    def apply_function(self):
        # update the outputted weights
        for container in self.data:
            apply_probs(container['nu_flux'],
                        container['prob_e'],
                        container['prob_mu'],
                        container['prob_nonsterile'],
                        out=container['weights'])
            container.mark_changed('weights')
Esempio n. 9
0
class pi_prob3(PiStage):
    """
    Prob3-like oscillation PISA Pi class

    Parameters
    ----------
    params
        Expected params .. ::

            detector_depth : float
            earth_model : PREM file path
            prop_height : quantity (dimensionless)
            YeI : quantity (dimensionless)
            YeO : quantity (dimensionless)
            YeM : quantity (dimensionless)
            theta12 : quantity (angle)
            theta13 : quantity (angle)
            theta23 : quantity (angle)
            deltam21 : quantity (mass^2)
            deltam31 : quantity (mass^2)
            deltacp : quantity (angle)
            eps_scale : quantity(dimensionless)
            eps_prime : quantity(dimensionless)
            phi12 : quantity(angle)
            phi13 : quantity(angle)
            phi23 : quantity(angle)
            alpha1 : quantity(angle)
            alpha2 : quantity(angle)
            deltansi : quantity(angle)
            eps_ee : quantity (dimensionless)
            eps_emu_magn : quantity (dimensionless)
            eps_emu_phase : quantity (angle)
            eps_etau_magn : quantity (dimensionless)
            eps_etau_phase : quantity (angle)
            eps_mumu : quantity(dimensionless)
            eps_mutau_magn : quantity (dimensionless)
            eps_mutau_phase : quantity (angle)
            eps_tautau : quantity (dimensionless)

    **kwargs
        Other kwargs are handled by PiStage
    -----

    """
  
    def __init__(
      self,
      nsi_type=None,
      reparam_mix_matrix=False,
      data=None,
      params=None,
      input_names=None,
      output_names=None,
      debug_mode=None,
      input_specs=None,
      calc_specs=None,
      output_specs=None,
    ):

        expected_params = (
          'detector_depth',
          'earth_model',
          'prop_height',
          'YeI',
          'YeO',
          'YeM',
          'theta12',
          'theta13',
          'theta23',
          'deltam21',
          'deltam31',
          'deltacp'
        )
      
        # Check whether and if so with which NSI parameters we are to work.
        if nsi_type is not None:
            choices = ['standard', 'vacuum-like']
            nsi_type = nsi_type.strip().lower()
            if not nsi_type in choices:
                raise ValueError(
                    'Chosen NSI type "%s" not available! Choose one of %s.'
                    % (nsi_type, choices)
                )
        self.nsi_type = nsi_type
        """Type of NSI to assume."""

        self.reparam_mix_matrix = reparam_mix_matrix
        """Use a PMNS mixing matrix parameterisation that differs from
           the standard one by an overall phase matrix
           diag(e^(i*delta_CP), 1, 1). This has no impact on
           oscillation probabilities in the *absence* of NSI."""

        if self.nsi_type is None:
            nsi_params = ()
        elif self.nsi_type == 'vacuum-like':
            nsi_params = ('eps_scale',
                          'eps_prime',
                          'phi12',
                          'phi13',
                          'phi23',
                          'alpha1',
                          'alpha2',
                          'deltansi'
            )
        elif self.nsi_type == 'standard':
            nsi_params = ('eps_ee',
                          'eps_emu_magn',
                          'eps_emu_phase',
                          'eps_etau_magn',
                          'eps_etau_phase',
                          'eps_mumu',
                          'eps_mutau_magn',
                          'eps_mutau_phase',
                          'eps_tautau'
            )
        expected_params = expected_params + nsi_params

        input_names = ()
        output_names = ()

        # what are the keys used from the inputs during apply
        input_apply_keys = ('weights', 'nu_flux')

        # what are keys added or altered in the calculation used during apply
        output_calc_keys = ('prob_e', 'prob_mu')

        # what keys are added or altered for the outputs during apply
        output_apply_keys = ('weights',)

        # init base class
        super().__init__(
            data=data,
            params=params,
            expected_params=expected_params,
            input_names=input_names,
            output_names=output_names,
            debug_mode=debug_mode,
            input_specs=input_specs,
            calc_specs=calc_specs,
            output_specs=output_specs,
            input_apply_keys=input_apply_keys,
            output_calc_keys=output_calc_keys,
            output_apply_keys=output_apply_keys,
        )

        assert self.input_mode is not None
        assert self.calc_mode is not None
        assert self.output_mode is not None

        self.layers = None
        self.osc_params = None
        self.nsi_params = None
        # Note that the interaction potential (Hamiltonian) just scales with the
        # electron density N_e for propagation through the Earth,
        # even(to very good approx.) in the presence of generalised interactions
        # (NSI), which is why we can simply treat it as a constant here.
        self.gen_mat_pot_matrix_complex = None
        """Interaction Hamiltonian without the factor sqrt(2)*G_F*N_e."""
        self.YeI = None
        self.YeO = None
        self.YeM = None

    def setup_function(self):

        # object for oscillation parameters
        self.osc_params = OscParams()
        if self.reparam_mix_matrix:
            logging.debug(
                'Working with reparameterizated version of mixing matrix.'
            )
        else:
            logging.debug(
                'Working with standard parameterization of mixing matrix.'
            )
        if self.nsi_type == 'vacuum-like':
            logging.debug('Working in vacuum-like NSI parameterization.')
            self.nsi_params = VacuumLikeNSIParams()
        elif self.nsi_type == 'standard':
            logging.debug('Working in standard NSI parameterization.')
            self.nsi_params = StdNSIParams()

        # setup the layers
        #if self.params.earth_model.value is not None:
        earth_model = find_resource(self.params.earth_model.value)
        self.YeI = self.params.YeI.value.m_as('dimensionless')
        self.YeO = self.params.YeO.value.m_as('dimensionless')
        self.YeM = self.params.YeM.value.m_as('dimensionless')
        prop_height = self.params.prop_height.value.m_as('km')
        detector_depth = self.params.detector_depth.value.m_as('km')
        self.layers = Layers(earth_model, detector_depth, prop_height)
        self.layers.setElecFrac(self.YeI, self.YeO, self.YeM)

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        # --- calculate the layers ---
        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            # as layers don't care about flavour
            self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
                                             'nue_nc', 'numu_nc', 'nutau_nc',
                                             'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                                             'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])

        for container in self.data:
            self.layers.calcLayers(container['true_coszen'].get('host'))
            container['densities'] = self.layers.density.reshape((container.size, self.layers.max_layers))
            container['distances'] = self.layers.distance.reshape((container.size, self.layers.max_layers))

        # don't forget to un-link everything again
        self.data.unlink_containers()

        # --- setup empty arrays ---
        if self.calc_mode == 'binned':
            self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
                                             'nue_nc', 'numu_nc', 'nutau_nc'])
            self.data.link_containers('nubar', ['nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                                                'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])
        for container in self.data:
            container['probability'] = np.empty((container.size, 3, 3), dtype=FTYPE)
        self.data.unlink_containers()

        # setup more empty arrays
        for container in self.data:
            container['prob_e'] = np.empty((container.size), dtype=FTYPE)
            container['prob_mu'] = np.empty((container.size), dtype=FTYPE)

    def calc_probs(self, nubar, e_array, rho_array, len_array, out):
        ''' wrapper to execute osc. calc '''
        if self.reparam_mix_matrix:
            mix_matrix = self.osc_params.mix_matrix_reparam_complex
        else:
            mix_matrix = self.osc_params.mix_matrix_complex
        propagate_array(self.osc_params.dm_matrix, # pylint: disable = unexpected-keyword-arg, no-value-for-parameter
                        mix_matrix,
                        self.gen_mat_pot_matrix_complex,
                        nubar,
                        e_array.get(WHERE),
                        rho_array.get(WHERE),
                        len_array.get(WHERE),
                        out=out.get(WHERE)
                       )
        out.mark_changed(WHERE)

    @profile
    def compute_function(self):

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
                                             'nue_nc', 'numu_nc', 'nutau_nc'])
            self.data.link_containers('nubar', ['nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                                                'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])

        # this can be done in a more clever way (don't have to recalculate all paths)
        YeI = self.params.YeI.value.m_as('dimensionless')
        YeO = self.params.YeO.value.m_as('dimensionless')
        YeM = self.params.YeM.value.m_as('dimensionless')
        if YeI != self.YeI or YeO != self.YeO or YeM != self.YeM:
            self.YeI = YeI; self.YeO = YeO; self.YeM = YeM
            self.layers.setElecFrac(self.YeI, self.YeO, self.YeM)
            for container in self.data:
                self.layers.calcLayers(container['true_coszen'].get('host'))
                container['densities'] = self.layers.density.reshape((container.size, self.layers.max_layers))
                container['distances'] = self.layers.distance.reshape((container.size, self.layers.max_layers))

        # some safety checks on units
        # trying to avoid issue of angles with no dimension being assumed to be radians
        # here we enforce the user must speficy a valid angle unit
        for angle_param in [self.params.theta12, self.params.theta13, self.params.theta23, self.params.deltacp] :
            assert angle_param.value.units != ureg.dimensionless, "Param %s is dimensionless, but should have angle units [rad, degree]" % angle_param.name

        # --- update mixing params ---
        self.osc_params.theta12 = self.params.theta12.value.m_as('rad')
        self.osc_params.theta13 = self.params.theta13.value.m_as('rad')
        self.osc_params.theta23 = self.params.theta23.value.m_as('rad')
        self.osc_params.dm21 = self.params.deltam21.value.m_as('eV**2')
        self.osc_params.dm31 = self.params.deltam31.value.m_as('eV**2')
        self.osc_params.deltacp = self.params.deltacp.value.m_as('rad')
        if self.nsi_type == 'vacuum-like':
            self.nsi_params.eps_scale = self.params.eps_scale.value.m_as('dimensionless')
            self.nsi_params.eps_prime = self.params.eps_prime.value.m_as('dimensionless')
            self.nsi_params.phi12 = self.params.phi12.value.m_as('rad')
            self.nsi_params.phi13 = self.params.phi13.value.m_as('rad')
            self.nsi_params.phi23 = self.params.phi23.value.m_as('rad')
            self.nsi_params.alpha1 = self.params.alpha1.value.m_as('rad')
            self.nsi_params.alpha2 = self.params.alpha2.value.m_as('rad')
            self.nsi_params.deltansi = self.params.deltansi.value.m_as('rad')
        elif self.nsi_type == 'standard':
            self.nsi_params.eps_ee = self.params.eps_ee.value.m_as('dimensionless')
            self.nsi_params.eps_emu = (
                (self.params.eps_emu_magn.value.m_as('dimensionless'),
                self.params.eps_emu_phase.value.m_as('rad'))
            )
            self.nsi_params.eps_etau = (
                (self.params.eps_etau_magn.value.m_as('dimensionless'),
                self.params.eps_etau_phase.value.m_as('rad'))
            )
            self.nsi_params.eps_mumu = self.params.eps_mumu.value.m_as('dimensionless')
            self.nsi_params.eps_mutau = (
                (self.params.eps_mutau_magn.value.m_as('dimensionless'),
                self.params.eps_mutau_phase.value.m_as('rad'))
            )
            self.nsi_params.eps_tautau = self.params.eps_tautau.value.m_as('dimensionless')

        # now we can proceed to calculate the generalised matter potential matrix
        std_mat_pot_matrix = np.zeros((3, 3), dtype=FTYPE) + 1.j * np.zeros((3, 3), dtype=FTYPE)
        std_mat_pot_matrix[0, 0] += 1.0

        # add effective nsi coupling matrix
        if self.nsi_type is not None:
            logging.debug('NSI matrix:\n%s' % self.nsi_params.eps_matrix)
            self.gen_mat_pot_matrix_complex = (
                std_mat_pot_matrix + self.nsi_params.eps_matrix
            )
            logging.debug('Using generalised matter potential:\n%s'
                          % self.gen_mat_pot_matrix_complex)
        else:
            self.gen_mat_pot_matrix_complex = std_mat_pot_matrix
            logging.debug('Using standard matter potential:\n%s'
                          % self.gen_mat_pot_matrix_complex)

        for container in self.data:
            self.calc_probs(container['nubar'],
                            container['true_energy'],
                            container['densities'],
                            container['distances'],
                            out=container['probability'],
                           )

        # the following is flavour specific, hence unlink
        self.data.unlink_containers()

        for container in self.data:
            # initial electrons (0)
            fill_probs(container['probability'].get(WHERE),
                       0,
                       container['flav'],
                       out=container['prob_e'].get(WHERE),
                      )
            # initial muons (1)
            fill_probs(container['probability'].get(WHERE),
                       1,
                       container['flav'],
                       out=container['prob_mu'].get(WHERE),
                      )

            container['prob_e'].mark_changed(WHERE)
            container['prob_mu'].mark_changed(WHERE)

    @profile
    def apply_function(self):

        # update the outputted weights
        for container in self.data:
            apply_probs(container['nu_flux'].get(WHERE),
                        container['prob_e'].get(WHERE),
                        container['prob_mu'].get(WHERE),
                        out=container['weights'].get(WHERE))
            container['weights'].mark_changed(WHERE)
Esempio n. 10
0
class pi_globes(PiStage):
    """
    GLoBES PISA Pi class

    Paramaters
    ----------
    detector_depth : float
    earth_model : PREM file path
    globes_wrapper : path to globes wrapper
    prop_height : quantity (dimensionless)
    params : ParamSet or sequence with which to instantiate a ParamSet.
      Expected params are:
        theta12 : quantity (angle)
        theta13 : quantity (angle)
        theta23 : quantity (angle)
        deltam21 : quantity (mass^2)
        deltam31 : quantity (mass^2)
        deltam41 : quantity (mass^2)
        theta24 : quantity (angle)
        theta34 : quantity (angle)
        deltacp : quantity (angle)

    Notes
    -----

    """
    def __init__(self,
                 earth_model,
                 globes_wrapper,
                 data=None,
                 params=None,
                 input_names=None,
                 output_names=None,
                 debug_mode=None,
                 input_specs=None,
                 calc_specs=None,
                 output_specs=None,
                 detector_depth=2.*ureg.km,
                 prop_height=20.*ureg.km
                ):

        expected_params = (
                           'theta12',
                           'theta13',
                           'theta23',
                           'deltam21',
                           'deltam31',
                           'deltam41',
                           'theta24',
                           'theta34',
                           'deltacp',
                          )

        input_names = ()
        output_names = ()

        # what are the keys used from the inputs during apply
        input_apply_keys = ('weights',
                            'sys_flux',
                           )
        # what are keys added or altered in the calculation used during apply
        output_calc_keys = ('prob_e',
                            'prob_mu',
                            'prob_nonsterile',
                           )
        # what keys are added or altered for the outputs during apply
        output_apply_keys = ('weights',
                      )

        # init base class
        super().__init__(
            data=data,
            params=params,
            expected_params=expected_params,
            input_names=input_names,
            output_names=output_names,
            debug_mode=debug_mode,
            input_specs=input_specs,
            calc_specs=calc_specs,
            output_specs=output_specs,
            input_apply_keys=input_apply_keys,
            output_calc_keys=output_calc_keys,
            output_apply_keys=output_apply_keys,
        )

        assert self.input_mode is not None
        assert self.calc_mode is not None
        assert self.output_mode is not None

        self.layers = None
        self.osc_params = None
        self.earth_model = earth_model
        self.globes_wrapper = globes_wrapper
        self.detector_depth = detector_depth
        self.prop_height = prop_height

        self.globes_calc = None
        # This does nothing for speed, but just allows one to use numpy broadcasting on the
        # arguments of the function. The internal implementation is basically a for-loop.
        # The signature is chosen so that the function for a single event expects an array
        # of n layer distances and densities.
        self.calc_prob_e_mu = np.vectorize(self.calc_prob_e_mu, signature='(),(),(),(n),(n)->(),()')
        self.calc_prob_nonsterile = np.vectorize(self.calc_prob_nonsterile,
                                                 signature='(),(),(),(n),(n)->()')
    def setup_function(self):
        sys.path.append(self.globes_wrapper)
        import GLoBES
        ### you need to start GLoBES from the folder containing a dummy experiment
        # therefore we go to the folder, load GLoBES and then go back
        curdir = os.getcwd()
        os.chdir(self.globes_wrapper)
        self.globes_calc =  GLoBES.GLoBESCalculator("calc")
        os.chdir(curdir)
        self.globes_calc.InitSteriles(2)
        # object for oscillation parameters
        self.osc_params = OscParams()
        earth_model = find_resource(self.earth_model)
        prop_height = self.prop_height.m_as('km')
        detector_depth = self.detector_depth.m_as('km')
        self.layers = Layers(earth_model, detector_depth, prop_height)
        # The electron fractions are taken into account internally by GLoBES/SNU.
        # See the SNU patch for details. It uses the density to decide
        # whether it is in the core or in the mantle. Therefore, we just multiply by
        # one to give GLoBES the raw densities.
        self.layers.setElecFrac(1., 1., 1.)

        # set the correct data mode
        self.data.data_specs = self.calc_specs

        # --- calculate the layers ---
        if self.calc_mode == 'binned':
            # speed up calculation by adding links
            # as layers don't care about flavour
            self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
                                             'nue_nc', 'numu_nc', 'nutau_nc',
                                             'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
                                             'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])

        for container in self.data:
            self.layers.calcLayers(container['true_coszen'].get('host'))
            container['densities'] = self.layers.density.reshape((container.size, self.layers.max_layers))
            container['distances'] = self.layers.distance.reshape((container.size, self.layers.max_layers))

        # don't forget to un-link everything again
        self.data.unlink_containers()

        # setup probability containers
        for container in self.data:
            container['prob_e'] = np.empty((container.size), dtype=FTYPE)
            container['prob_mu'] = np.empty((container.size), dtype=FTYPE)
            container['prob_nonsterile'] = np.empty((container.size), dtype=FTYPE)

    def calc_prob_e_mu(self, flav, nubar, energy, rho_array, len_array):
        '''Calculates probability for an electron/muon neutrino to oscillate into
        the flavour of a given event, including effects from sterile neutrinos.
        '''
        # We use the layers module to calculate lengths and densities.
        # The output must be converted into a regular python list.
        self.globes_calc.SetManualDensities(list(len_array), list(rho_array))
        # this calls the calculator without the calculation of layers
        # The flavour convention in GLoBES is that
        #  e = 1, mu = 2, tau = 3
        # while in PISA it's
        #  e = 0, mu = 1, tau = 2
        # which is why we add +1 to the flavour.
        # Nubar follows the same convention in PISA and GLoBES:
        #  +1 = particle, -1 = antiparticle
        nue_to_nux = self.globes_calc.MatterProbabilityPrevBaseline(1, flav+1, nubar, energy)
        numu_to_nux = self.globes_calc.MatterProbabilityPrevBaseline(2, flav+1, nubar, energy)
        return (nue_to_nux, numu_to_nux)

    def calc_prob_nonsterile(self, flav, nubar, energy, rho_array, len_array):
        '''Calculates the probability of a given neutrino to oscillate into
        another non-sterile flavour.
        '''
        # We use the layers module to calculate lengths and densities.
        # The output must be converted into a regular python list.
        self.globes_calc.SetManualDensities(list(len_array), list(rho_array))
        # this calls the calculator without the calculation of layers
        # The flavour convention in GLoBES is that
        #  e = 1, mu = 2, tau = 3
        # while in PISA it's
        #  e = 0, mu = 1, tau = 2
        # which is why we add +1 to the flavour.
        # Nubar follows the same convention in PISA and GLoBES:
        #  +1 = particle, -1 = antiparticle
        nux_to_nue = self.globes_calc.MatterProbabilityPrevBaseline(flav+1, 1, nubar, energy)
        nux_to_numu = self.globes_calc.MatterProbabilityPrevBaseline(flav+1, 2, nubar, energy)
        nux_to_nutau = self.globes_calc.MatterProbabilityPrevBaseline(flav+1, 3, nubar, energy)
        nux_to_nonsterile = nux_to_nue + nux_to_numu + nux_to_nutau
        return nux_to_nonsterile

    @profile
    def compute_function(self):

        # --- update mixing params ---
        params = np.array([self.params.theta12.value.m_as('rad'),
                           self.params.theta13.value.m_as('rad'),
                           self.params.theta23.value.m_as('rad'),
                           self.params.deltacp.value.m_as('rad'),
                           self.params.deltam21.value.m_as('eV**2'),
                           self.params.deltam31.value.m_as('eV**2'),
                           self.params.deltam41.value.m_as('eV**2'),
                           0.0,
                           self.params.theta24.value.m_as('rad'),
                           self.params.theta34.value.m_as('rad'),
                           0.0,
                           0.0
                           ], dtype=float)
        self.globes_calc.SetParametersArr(params)
        # set the correct data mode
        self.data.data_specs = self.calc_specs

        for container in self.data:
            # standard oscillations are only applied to charged current events,
            # while the loss due to oscillation into sterile neutrinos is only
            # applied to neutral current events.
            if '_cc' in container.name:
                prob_e, prob_mu = self.calc_prob_e_mu(container['flav'],
                                                      container['nubar'],
                                                      container['true_energy'],
                                                      container['densities'],
                                                      container['distances']
                                                     )
                container['prob_e'] = prob_e
                container['prob_mu'] = prob_mu
                container['prob_nonsterile'] = np.ones_like(prob_e)
            elif '_nc' in container.name:
                prob_nonsterile = self.calc_prob_nonsterile(container['flav'],
                                                            container['nubar'],
                                                            container['true_energy'],
                                                            container['densities'],
                                                            container['distances']
                                                           )
                if 'nue' in container.name:
                    container['prob_e'] = np.ones_like(prob_nonsterile)
                    container['prob_mu'] = np.zeros_like(prob_nonsterile)
                elif 'numu' in container.name:
                    container['prob_e'] = np.zeros_like(prob_nonsterile)
                    container['prob_mu'] = np.ones_like(prob_nonsterile)
                elif 'nutau' in container.name:
                    container['prob_e'] = np.zeros_like(prob_nonsterile)
                    container['prob_mu'] = np.zeros_like(prob_nonsterile)
                else:
                    raise Exception('unknown container name: %s' % container.name)
                container['prob_nonsterile'] = prob_nonsterile
            else:
                raise Exception('unknown container name: %s' % container.name)
            container['prob_e'].mark_changed(WHERE)
            container['prob_mu'].mark_changed(WHERE)
            container['prob_nonsterile'].mark_changed(WHERE)

    @profile
    def apply_function(self):
        # update the outputted weights
        for container in self.data:
            apply_probs(container['sys_flux'].get(WHERE),
                        container['prob_e'].get(WHERE),
                        container['prob_mu'].get(WHERE),
                        container['prob_nonsterile'].get(WHERE),
                        out=container['weights'].get(WHERE))
            container['weights'].mark_changed(WHERE)