Ejemplo n.º 1
0
    def _init_coupling_mat(self):
        """Initialises the coupling matrix directly in sparse (csr) format.
        """
        info(0, 'Initiating coupling matrix in ({:}) format'.format('CSR'))

        from scipy.sparse import csr_matrix
        if using_cupy:
            # For GPU we initialize the csr matrix on the host and then cast to GPU
            from cupyx.scipy.sparse import csr_matrix as cp_csr_matrix
            self.coupling_mat_np = csr_matrix(
                (self._batch_vec.astype(np.float32),
                 (self._batch_rows, self._batch_cols)),
                copy=True)
            self.coupling_mat = cp_csr_matrix(self.coupling_mat_np, copy=True)
            self._batch_vec = self.coupling_mat.data
            del self.coupling_mat_np
        else:
            self.coupling_mat = csr_matrix(
                (self._batch_vec, (self._batch_rows, self._batch_cols)),
                copy=True)

        # create an index to sort by rows and then columns,
        # which is the same ordering CSR has internally
        # lexsort sorts by last argument first!!!
        self.sortidx = np.lexsort((self._batch_cols, self._batch_rows))

        self._batch_rows = self._batch_rows[self.sortidx]
        self._batch_cols = self._batch_cols[self.sortidx]

        # Reorder batch matrix according to order in coupling_mat
        if using_cupy:
            self._batch_matrix = cupy.array(
                self._batch_matrix[self.sortidx, :], dtype=np.float32)
        else:
            self._batch_matrix = self._batch_matrix[self.sortidx, :]
Ejemplo n.º 2
0
    def _update_rates(self, z, force_update=False):
        """Batch compute all nonel and inclusive rates if z changes.

        The result is always stored in the same vectors, since '_init_rate_matstruc'
        makes use of views to link ranges of the vector to locations in the matrix.

        Args:
            z (float): Redshift value at which the photon field is taken.

        Returns:
            (bool): True if fields we indeed updated, False if nothing happened.
        """
        if self._ratemat_zcache != z or force_update:
            info(5, 'Updating batch rate vectors.')

            if using_cupy:
                if isinstance(self._batch_matrix, np.ndarray):
                    self._init_coupling_mat()
                cupy.dot(self._batch_matrix,
                         cupy.array(self.photon_vector(z), dtype=np.float32),
                         out=self._batch_vec)
            else:
                np.dot(self._batch_matrix,
                       self.photon_vector(z),
                       out=self._batch_vec)
            self._ratemat_zcache = z
            return True
        else:
            return False
Ejemplo n.º 3
0
def get_particle_channels(mo, mo_energy, da_energy):
    """
    Loops over a all daughers for a given mother and generates
    a list of redistribution matrices on the grid:
     np.outer( da_energy , 1 / mo_energy )
    
    Args:
      mo (int): id of the mother particle
      mo_energy (float): energy grid of the mother particle
      da_energy (float): energy grid of the daughter particle (same for all daughters)
    Returns:
      list of np.array: list of redistribution functions on on xgrid 
    """
    info(10, 'Generating decay redistribution for', mo)
    dbentry = spec_data[mo]
    x_grid = np.outer(da_energy, (1 / mo_energy))

    redist = {}
    for branching, daughters in dbentry['branchings']:
        for da in daughters:
            # daughter is a nucleus, we have lorentz factor conservation
            if da > 99:
                res = np.zeros(x_grid.shape)
                res[x_grid == 1.] = 1.
            else:
                res = get_decay_matrix(mo, da, x_grid)
            redist[da] = branching * res

    return x_grid, redist
Ejemplo n.º 4
0
    def __init__(self, prince_run=None, with_dense_jac=True, *args, **kwargs):
        info(3, 'creating instance')
        self.with_dense_jac = with_dense_jac

        #: Reference to PhotonField object
        self.photon_field = prince_run.photon_field

        #: Reference to CrossSection object
        self.cross_sections = prince_run.cross_sections

        #: Reference to species manager
        self.spec_man = prince_run.spec_man

        # Initialize grids
        self.e_photon = prince_run.ph_grid
        self.e_cosmicray = prince_run.cr_grid

        # Initialize cache of redshift value
        self._ph_vec_zcache = None
        self._ratemat_zcache = None

        # Initialize the matrices for batch computation
        self._batch_rows = None
        self._batch_cols = None
        self._batch_matrix = None
        self._batch_vec = None
        self.coupling_mat = None
        self.dense_coupling_mat = None

        self._estimate_batch_matrix()
        self._init_matrices()
        self._init_coupling_mat()
Ejemplo n.º 5
0
    def _arange_on_xgrid(self, incl_cs):
        """Returns the inclusive cross section on an xgrid at x=1."""

        egr, cs = None, None

        if isinstance(incl_cs, tuple):
            egr, cs = incl_cs
        else:
            cs = incl_cs

        nxbins = len(self.xbins) - 1
        if len(cs.shape) > 1 and cs.shape[0] != nxbins:
            raise Exception(
                'One dimensional cross section expected, instead got',
                cs.shape, '\n', cs)
        elif len(cs.shape) == 2 and cs.shape[0] == nxbins:
            info(20, 'Supplied 2D distribution seems to be distributed in x.')
            if isinstance(incl_cs, tuple):
                return egr, cs
            return cs

        csec = np.zeros((nxbins, cs.shape[0]))
        # NOTE: The factor 2 in the following line is a workarround to account for the latter linear interpolation
        #       This is needed because linear spline integral will result in a trapz,
        #       which has half the area of the actual first bin
        corr_factor = 2 * self.xwidths[-1] / (self.xcenters[-1] -
                                              self.xcenters[-2])
        csec[-1, :] = cs / self.xwidths[-1] * corr_factor
        info(
            4,
            'Warning! Workaround to account for linear interpolation in x, factor 2 added!'
        )
        if isinstance(incl_cs, tuple):
            return egr, csec
        return csec
Ejemplo n.º 6
0
    def _estimate_batch_matrix(self):
        ''' estimate dimension of the batch matrix'''
        dcr = self.e_cosmicray.d
        dph = self.e_photon.d

        batch_dim = 0
        for specid in self.spec_man.known_species:
            if specid < 100:
                continue
            # Add the main diagonal self-couplings (absoption)
            batch_dim += dcr
            for rtup in self.cross_sections.reactions[specid]:
                # Off main diagonal couplings (reinjection)
                if rtup in self.cross_sections.known_bc_channels:
                    batch_dim += dcr
                elif rtup in self.cross_sections.known_diff_channels:
                    # Only half of the elements can be non-zero (energy conservation)
                    batch_dim += int(dcr**2 / 2) + 1

        info(2, 'Batch matrix dimensions are {0}x{1}'.format(batch_dim, dph))
        self._batch_matrix = np.zeros((batch_dim, dph))
        self._batch_rows = []
        self._batch_cols = []
        info(
            3,
            'Memory usage: {0} MB'.format(self._batch_matrix.nbytes / 1024**2))
Ejemplo n.º 7
0
def nu_from_beta_decay_old(x_grid, mother, daughter):
    """
    Energy distribution of a neutrinos from beta-decays of mother to daughter

    Args:
      x (float): energy fraction transferred to the secondary
      mother (int): id of mother
      daughter (int): id of daughter
    Returns:
      float: probability density at x
    """

    info(10, 'Calculating neutrino energy from beta decay', mother, daughter)

    mass_el = spec_data[20]['mass']
    mass_mo = spec_data[mother]['mass']
    mass_da = spec_data[daughter]['mass']

    Z_mo = spec_data[mother]['charge']
    Z_da = spec_data[daughter]['charge']

    print(mother, daughter)
    if mother == 100 and daughter == 101:
        # for this channel the masses are already nucleon masses
        qval = mass_mo - mass_da - mass_el
    elif Z_da == Z_mo + 1:  # beta+ decay
        qval = mass_mo - mass_da - 2 * mass_el
    elif Z_da == Z_mo - 1:  # beta- decay
        qval = mass_mo - mass_da
    else:
        raise Exception('Not an allowed beta decay channel: {:} -> {:}'.format(
            mother, daughter))

    E0 = qval + mass_el
    print('Qval', qval, 'E0', E0)
    ye = mass_el / E0
    y_grid = x_grid * mass_mo / 2 / E0

    # norm factor, nomalizing the formula to 1
    norm = 1. / 60. * (np.sqrt(1. - ye**2) * (2 - 9 * ye**2 - 8 * ye**4) +
                       15 * ye**4 * np.log(ye / (1 - np.sqrt(1 - ye**2))))

    cond = y_grid < 1 - ye
    print((1 - ye) * 2 * E0 / mass_mo)
    yshort = y_grid[cond]

    result = np.zeros(y_grid.shape)
    # factor for substitution y -> x
    subst = mass_mo / 2 / E0

    # total formula
    result[cond] = subst / norm * yshort**2 * (1 - yshort) * np.sqrt(
        (1 - yshort)**2 - ye**2)

    result[x_grid > 1] *= 0.

    return result
Ejemplo n.º 8
0
 def resp(self):
     """Return ResponseFunction corresponding to this cross section
     Will only create the Response function once. 
     """
     if not hasattr(self, '_resp'):
         info(2, 'First Call, creating instance of ResponseFunction now')
         from .response import ResponseFunction
         self._resp = ResponseFunction(self)
     return self._resp
Ejemplo n.º 9
0
    def __init__(self, prince_run, energy='grid', *args, **kwargs):
        info(3, 'creating instance')
        #: Reference to species manager
        self.spec_man = prince_run.spec_man

        # Initialize grids
        self.e_cosmicray = prince_run.cr_grid
        self.dim_states = prince_run.dim_states
        self.dim_bins = prince_run.dim_bins
        # Init adiabatic loss vector
        self.energy_vector = self._init_energy_vec(energy)
Ejemplo n.º 10
0
    def _update_jacobian(self, z):
        info(5, 'Updating jacobian matrix at redshift', z)

        # enable photohadronic losses, or use a zero matrix
        if self.enable_photohad_losses:
            self.jacobian = self.had_int_rates.get_hadr_jacobian(
                z, self.dldz(z), force_update=True)
        else:
            self.jacobian = self.had_int_rates.get_hadr_jacobian(
                z, self.dldz(z), force_update=True)
            self.jacobian.data *= 0.

        self.last_hadr_jac = None
Ejemplo n.º 11
0
    def _precompute_interpolators(self):
        """Interpolate each response function and store interpolators.

        Uses :func:`prince_cr.util.get_interp_object` as interpolator.
        This might result in too many knots and can be subject to
        future optimization.
        """

        info(2, 'Computing interpolators for response functions')

        info(5, 'Nonelastic response functions f(y)')
        self.nonel_intp = {}
        for mother in self.nonel_idcs:
            self.nonel_intp[mother] = get_interp_object(
                *self.get_channel(mother))

        info(5, 'Inclusive (boost conserving) response functions g(y)')
        self.incl_intp = {}
        for mother, daughter in self.incl_idcs:
            self.incl_intp[(mother, daughter)] = get_interp_object(
                *self.get_channel(mother, daughter))

        info(5, 'Inclusive (redistributed) response functions h(y)')
        self.incl_diff_intp = {}
        for mother, daughter in self.incl_diff_idcs:
            ygr, rfunc = self.get_channel(mother, daughter)
            self.incl_diff_intp[(mother, daughter)] = get_2Dinterp_object(
                self.xcenters, ygr, rfunc, self.cross_section.xbins)

            from scipy.integrate import cumtrapz
            integral = cumtrapz(rfunc, ygr, axis=1,initial=0)
            integral = cumtrapz(integral, self.xcenters, axis=0,initial=0)

            self.incl_diff_intp_integral[(mother, daughter)] = get_2Dinterp_object(
                self.xcenters, ygr, integral, self.cross_section.xbins)
Ejemplo n.º 12
0
    def incl(self, mother, daughter):
        r"""Returns inclusive cross section.

        Inclusive cross section for daughter in photo-nuclear
        interactions of `mother`.

        Args:
            mother (int): Mother nucleus(on)
            daughter (int): Daughter nucleus(on)

        Returns:
            (numpy.array, numpy.array): self._egrid_tab (:math:`\epsilon_r`),
            inclusive cross section in :math:`cm^{-2}`
        """

        _, Z, N = get_AZN(mother)

        if daughter <= 101:
            # raise Exception('Boost conserving cross section called ' +
            #                 'for redistributed particle')
            from scipy.integrate import trapz

            _, cs_diff = self.incl_diff(mother, daughter)
            cs_incl = trapz(cs_diff,
                            x=self.xcenters,
                            dx=bin_widths(self.xbins),
                            axis=0)
            return self.egrid, cs_incl[self._range]

        elif daughter >= 200 and daughter not in [mother - 101, mother - 100]:
            info(10, 'mother, daughter', mother, daughter, 'out of range')
            return self.egrid[[0, -1]], np.array([0., 0.])

        if daughter in [mother - 101]:
            cgrid = Z * self.cs_proton_grid
            # created incl. diff. index for all particle created in p-gamma
            for da in self.redist_proton:
                self.incl_diff_idcs.append((mother, da))
            return self.egrid, cgrid[self._range]
        elif daughter in [mother - 100]:
            cgrid = N * self.cs_neutron_grid
            # created incl. diff. channel index for all particle created in n-gamma
            for da in self.redist_neutron:
                self.incl_diff_idcs.append((mother, da))
            return self.egrid, cgrid[self._range]
        else:
            raise Exception(
                'Channel {:} to {:} not allowed in this superposition model'.
                format(mother, daughter))
Ejemplo n.º 13
0
    def _join_nonel(self, mother):
        """Returns the non-elastic cross section of the joined models.
        """

        info(5, 'Joining nonelastic cross sections for', mother)

        egrid = []
        nonel = []
        for model in self.model_refs:
            e, csec = model.nonel(mother)
            egrid.append(e)
            nonel.append(csec)

        #return np.concatenate(nonel)
        return np.concatenate(egrid), np.concatenate(nonel)
Ejemplo n.º 14
0
    def _join_incl(self, mother, daughter):
        """Returns joined incl cross sections."""

        info(5, 'Joining inclusive cross sections for channel',
             (mother, daughter))
        egrid = []
        incl = []

        for model in self.model_refs:
            e, csec = model.incl(mother, daughter)
            egrid.append(e)
            incl.append(csec)
        #print np.concatenate(egrid), np.concatenate(incl)
        #print '---'*30
        #return np.concatenate(incl)
        return np.concatenate(egrid), np.concatenate(incl)
Ejemplo n.º 15
0
    def _join_incl_diff(self, mother, daughter):
        """Returns joined incl diff cross sections.

        The function assumes the same `x` bins for all models.
        """

        info(5, 'Joining inclusive differential cross sections for channel',
             (mother, daughter))

        egrid = []
        incl_diff = []

        # Get an x grid from a model which supports it
        for model in self.model_refs:
            if model.supports_redistributions:
                self.xbins = model.xbins
                break
        if self.xbins is None:
            raise Exception('Redistributions requested but none of the ' +
                            'models supports it')

        for model in self.model_refs:
            egr, csec = None, None
            if config.debug_level > 1:
                if not np.allclose(self.xbins, model.xbins):
                    raise Exception('Unequal x bins. Aborting...',
                                    self.xbins.shape, model.xbins)
            if (mother, daughter) in model.incl_diff_idcs:
                egr, csec = model.incl_diff(mother, daughter)
                info(10, model.mname, mother, daughter, 'is differential.')

            elif (mother, daughter) in model.incl_idcs:
                # try to use incl and extend by zeros
                egr, csec_1d = model.incl(mother, daughter)
                print(mother, daughter, csec_1d.shape)
                # no x-distribution given, so x = 1
                csec = self._arange_on_xgrid(csec_1d)
                info(1, model.mname, mother, daughter,
                     'not differential, x=1.')
            else:
                info(
                    5, 'Model', model.mname, 'does not provide cross',
                    'sections for channel {0}/{1}. Setting to zero.'.format(
                        mother, daughter))
                # Tried with reduced energy grids to save memory, but
                # matrix addition in decay chains becomes untrasparent
                # egr = np.array((model.egrid[0], model.egrid[-1]))
                # csec = np.zeros((len(self.xbins) - 1, 2))
                egr = model.egrid
                csec = np.zeros((len(self.xbins) - 1, model.egrid.size))

            egrid.append(egr)
            incl_diff.append(csec)

        #return np.concatenate(incl_diff, axis=1)
        return np.concatenate(egrid), np.concatenate(incl_diff, axis=1)
Ejemplo n.º 16
0
    def _compute_injection_grid(self):
        """Precompute the injection for all species on a single grid.
        
        Assumes that the injection is factorized in E and z"""
        self.injection_grid = np.zeros(self.dim_states)

        for pid in self.ncoids:
            if pid in self.params:
                params = self.params[pid]
            else:
                params = params

            info(
                4, 'Injecting particle {:} with parameters {:}'.format(
                    pid, params))
            inj_spec = self.spec_man.ncoid2sref[pid]
            self.injection_grid[inj_spec.sl] = self.injection_spectrum(
                pid, self.cr_grid, params)
Ejemplo n.º 17
0
    def set_range(self, e_min=None, e_max=None):
        """Set energy range within which to return tabulated data.

        Args:
            e_min (float): minimal energy in GeV
            e_max (float): maximal energy in GeV
        """
        if e_min is None:
            e_min = np.min(self._egrid_tab)
        if e_max is None:
            e_max = np.max(self._egrid_tab)

        info(5, "Setting range to {0:3.2e} - {1:3.2e}".format(e_min, e_max))
        self._range = np.where((self._egrid_tab >= e_min)
                               & (self._egrid_tab <= e_max))[0]
        info(
            2, "Range set to {0:3.2e} - {1:3.2e}".format(
                np.min(self._egrid_tab[self._range]),
                np.max(self._egrid_tab[self._range])))
Ejemplo n.º 18
0
    def _load(self):
        from prince_cr.data import db_handler
        info(2, "Load tabulated cross sections")
        photo_nuclear_tables = db_handler.photo_meson_db('SOPHIA')
        info(2, "Loading SOPHIA cross sections from file.")

        egrid = photo_nuclear_tables["energy_grid"]
        xbins = photo_nuclear_tables["xbins"]
        info(2, "Egrid loading finished")

        # Integer idices of mothers and inclusive channels are stored
        # in first column(s)
        pid_nonel = photo_nuclear_tables["inel_mothers"]
        pids_incl = photo_nuclear_tables["mothers_daughters"]

        # the rest of the line denotes the crosssection on the egrid in mbarn,
        # which is converted here to cm^2
        nonel_raw = photo_nuclear_tables["inelastic_cross_sctions"]
        incl_raw = photo_nuclear_tables["fragment_yields"]

        info(2, "Data file loading finished")

        self._egrid_tab = egrid
        self.cs_proton_grid = nonel_raw[pid_nonel == 101].flatten()
        self.cs_neutron_grid = nonel_raw[pid_nonel == 100].flatten()

        self.xbins = xbins
        self.redist_proton = {}
        self.redist_neutron = {}
        for (mo, da), csgrid in zip(pids_incl, incl_raw):
            if mo == 101:
                self.redist_proton[da] = csgrid
            elif mo == 100:
                self.redist_neutron[da] = csgrid
            else:
                raise Exception(
                    f'Sophia model should only contain protons and neutrons, but has mother id {mo}'
                )

        # set up inclusive differential channels for protons and neutron
        # The model can return both, integrated over x and redistributed.
        for da in sorted(self.redist_proton):
            self.incl_diff_idcs.append((101, da))
            #self.incl_idcs.append((101, da))
        for da in sorted(self.redist_neutron):
            self.incl_diff_idcs.append((100, da))
            #self.incl_idcs.append((100, da))

        # For more convenient generation of trivial redistribution matrices when joining
        self.redist_shape = (self.xbins.shape[0], self._egrid_tab.shape[0])
        self.set_range()
Ejemplo n.º 19
0
        def convolve_with_decay_distribution(diff_dist, mother, daughter,
                                             branching_ratio):
            r"""Computes the prompt decay xdist by convolving the x distribution
            of the unstable particle with the decay product distribution.

            :math:`\frac{{\rm d}N^{A\gamma \to \mu}}{{\rm d}x_j} = 
            \sum_{i=0}^{N_x}~\Delta x_i 
            \frac{{\rm d}N^{A\gamma \to \pi}}{{\rm d} x_i}~
            \frac{{\rm d}N^{\pi \to \mu}}{{\rm d} x_j}`
            """
            # dec_dist = int_scale * decs.get_decay_matrix(
            #     mother, daughter, dec_grid)
            dec_dist = decay_cached(mother,daughter)

            info(20, 'convolving with decay dist', mother, daughter)
            # Handle the case where table entry is (energy_grid, matrix)
            if not isinstance(diff_dist, tuple):
                return branching_ratio * dec_dist.dot(diff_dist)
            else:
                return diff_dist[0], branching_ratio * dec_dist.dot(
                    diff_dist[1])
Ejemplo n.º 20
0
    def is_differential(self, mother, daughter):
        """Returns true if the model supports redistributions and requested
        mother/daughter combination should return non-zero redistribution matrices.

        Args:
            mother (bool): Neucosma ID of mother particle
            daughter (bool): Neucosma ID of daughter particle

        Returns:
            (bool): ``True`` if the model has this particular redistribution function
        """
        # info(10, mother, daughter, " asking for redist")
        # if not self.supports_redistributions:
        #     info(10, mother, daughter, " model doesn't support redist")
        #     return False
        if (daughter <= config.redist_threshold_ID
                or (mother, daughter) in self.incl_diff_idcs):
            info(60, 'Daughter requires redistribution.', mother, daughter)
            return True
        info(60, 'Daughter conserves boost.', mother, daughter)
        return False
Ejemplo n.º 21
0
    def __init__(self, prince_run, energy='grid', *args, **kwargs):
        info(3, 'creating instance')
        #: Reference to species manager
        self.spec_man = prince_run.spec_man

        #: Reference to PhotonField object
        self.photon_field = prince_run.photon_field

        # Initialize grids
        self.e_cosmicray = prince_run.cr_grid
        self.dim_states = prince_run.dim_states
        self.dim_bins = prince_run.dim_bins
        self.e_photon = prince_run.ph_grid

        # xi is dimensionless (natural units) variable
        xi_steps = 400 if 'xi_steps' not in kwargs else kwargs['xi_steps']
        info(2, 'using', xi_steps, 'steps in xi')
        self.xi = np.logspace(np.log10(2 + 1e-8), 16., xi_steps)

        # weights for integration
        self.phi_xi2 = self._phi(self.xi) / (self.xi**2)

        # Scale vector containing the units and factors of Z**2 for nuclei
        self.scale_vec = self._init_scale_vec(energy)

        # Gamma factor of the cosmic ray
        if energy == 'grid':
            gamma = self.e_cosmicray.grid / PRINCE_UNITS.m_proton
        elif energy == 'bins':
            gamma = self.e_cosmicray.bins / PRINCE_UNITS.m_proton
        else:
            raise Exception(
                'Unexpected energy keyword ({:}), use either (grid) or (bins)',
                format(energy))
        # Grid of photon energies for interpolation
        self.photon_grid = np.outer(1 / gamma,
                                    self.xi) * PRINCE_UNITS.m_electron / 2.
        self.pg_desort = self.photon_grid.reshape(-1).argsort()
        self.pg_sorted = self.photon_grid.reshape(-1)[self.pg_desort]
Ejemplo n.º 22
0
    def _load(self, model_prefix):
        from prince_cr.data import db_handler
        info(2, "Load tabulated cross sections")
        # The energy grid is given in MeV, so we convert to GeV
        photo_nuclear_tables = db_handler.photo_nuclear_db(model_prefix)

        egrid = photo_nuclear_tables["energy_grid"]
        info(2, "Egrid loading finished")

        # Integer idices of mothers and inclusive channels are stored
        # in first column(s)
        pid_nonel = photo_nuclear_tables["inel_mothers"]
        pids_incl = photo_nuclear_tables["mothers_daughters"]

        # the rest of the line denotes the crosssection on the egrid in mbarn,
        # which is converted here to cm^2
        nonel_raw = photo_nuclear_tables["inelastic_cross_sctions"]
        incl_raw = photo_nuclear_tables["fragment_yields"]

        info(2, "Data file loading finished")

        # Now write the raw data into a dict structure
        _nonel_tab = {}
        for pid, csgrid in zip(pid_nonel, nonel_raw):
            if get_AZN(pid)[0] > config.max_mass:
                continue
            _nonel_tab[pid] = csgrid

        # If proton and neutron cross sections are not in contained
        # in the files, set them to 0. Needed for TALYS and CRPropa2
        for pid in [101, 100]:
            if pid not in _nonel_tab:
                _nonel_tab[pid] = np.zeros_like(egrid)

        # mo = mother, da = daughter
        _incl_tab = {}
        for (mo, da), csgrid in zip(pids_incl, incl_raw):
            if get_AZN(mo)[0] > config.max_mass:
                continue
            _incl_tab[mo, da] = csgrid

        self._egrid_tab = egrid
        self._nonel_tab = _nonel_tab
        self._incl_tab = _incl_tab
        # Set initial range to whole egrid
        self.set_range()
        info(2, "Finished initialization")
Ejemplo n.º 23
0
    def get_full(self, mother, daughter, ygrid, xgrid=None):
        """Return the full response function :math:`f(y) + g(y) + h(x,y)`
        on the grid that is provided. xgrid is ignored if `h(x,y)` not in the channel.
        """
        if xgrid is not None and ygrid.shape != xgrid.shape:
            raise Exception('ygrid and xgrid do not have the same shape!!')
        if get_AZN(mother)[0] < get_AZN(daughter)[0]:
            info(
                3,
                'WARNING: channel {:} -> {:} with daughter heavier than mother!'
                .format(mother, daughter))

        res = np.zeros(ygrid.shape)

        if (mother, daughter) in self.incl_intp:
            res += self.incl_intp[(mother, daughter)](ygrid)
        elif (mother, daughter) in self.incl_diff_intp:
            #incl_diff_res = self.incl_diff_intp[(mother, daughter)](
            #    xgrid, ygrid, grid=False)
            #if mother == 101:
            #    incl_diff_res = np.where(xgrid < 0.9, incl_diff_res, 0.)
            #res += incl_diff_res
            #if not(mother == daughter):
            res += self.incl_diff_intp[(mother, daughter)].inteval(xgrid,
                                                                   ygrid,
                                                                   grid=False)

        if mother == daughter and mother in self.nonel_intp:
            # nonel cross section leads to absorption, therefore the minus
            if xgrid is None:
                res -= self.nonel_intp[mother](ygrid)
            else:
                diagonal = xgrid == 1.
                res[diagonal] -= self.nonel_intp[mother](ygrid[diagonal])

        return res
Ejemplo n.º 24
0
        def follow_chain(first_mo, da, csection, reclev):
            """Recursive function to follow decay chains until all
            final state particles are stable.
            
            The result is saved in two dictionaries; one for the boost
            conserving inclusive channels and the other one collects
            channels with meson or lepton decay products, which will
            need special care due to energy redistributions of these
            secondaries.
            """

            info(10, dbg_indent(reclev), 'Entering with', first_mo, da)

            if da not in spec_data:
                info(
                    3, dbg_indent(reclev),
                    'daughter {0} unknown, forcing beta decay. Not Implemented yet!!'
                    .format(da))
                return

            # Daughter is stable. Add it to the new dictionary and terminate
            # recursion
            if spec_data[da]["lifetime"] >= threshold:
                if self.is_differential(None, da):
                    # If the daughter is a meson or lepton, use the dictionary for
                    # differential channels
                    info(
                        20, dbg_indent(reclev),
                        'daughter {0} stable and differential. Adding to ({1}, {2})'
                        .format(da, first_mo, da))
                    new_dec_diff_tab[(first_mo, da)] = csection
                else:
                    info(
                        20, dbg_indent(reclev),
                        'daughter {0} stable. Adding to ({1}, {2})'.format(
                            da, first_mo, da))
                    new_incl_tab[(first_mo, da)] = csection
                return

            # ..otherwise follow decay products of this daughter, tracking the
            # original mother particle (first_mo). The cross section (csection) is
            # reduced by the branching ratio (br) of this particular channel
            for br, daughters in spec_data[da]["branchings"]:
                info(10, dbg_indent(reclev),
                     ("{3} -> {0:4d} -> {2:4.2f}: {1}").format(
                         da, ", ".join(map(str, daughters)), br, first_mo))

                for chained_daughter in daughters:
                    # Follow each secondary and increment the recursion level by one
                    if self.is_differential(None, chained_daughter):
                        info(10, 'daughter', chained_daughter, 'of', da,
                             'is differential')
                        follow_chain(
                            first_mo, chained_daughter,
                            convolve_with_decay_distribution(
                                self._arange_on_xgrid(csection), da,
                                chained_daughter, br), reclev + 1)
                    else:
                        follow_chain(first_mo, chained_daughter, br * csection,
                                     reclev + 1)
Ejemplo n.º 25
0
    def _reduce_channels(self):
        """Follows decay chains until all inclusive reactions point to
        stable final state particles.

        The "tau_dec_threshold" parameter in the config controls the
        definition of stable. Unstable nuclei for which no decay channels
        are known, will be forced to beta-decay until they reach a stable
        element.
        """
        from prince_cr.util import AdditiveDictionary
        # TODO: check routine, how to avoid empty channels and
        # mothers with zero nonel cross sections

        # The new dictionary that will replace _incl_tab
        new_incl_tab = AdditiveDictionary()
        new_dec_diff_tab = AdditiveDictionary()

        threshold = config.tau_dec_threshold

        # How to indent debug printout for recursion
        dbg_indent = lambda lev: 4 * lev * "-" + ">" if lev else ""

        info(2, "Integrating out species with lifetime smaller than",
             threshold)
        info(3, (
            "Before optimization, the number of known primaries is {0} with " +
            "in total {1} inclusive channels").format(len(self._nonel_tab),
                                                      len(self._incl_tab)))

        if self.xbins is None:
            info(
                4,
                'Model does not provide a native xbins. Assuming JH special sophia',
                'binning.')
            from .photo_meson import SophiaSuperposition
            self.xbins = SophiaSuperposition().xbins

        bc = self.xcenters
        bw = bin_widths(self.xbins)
        # The x_mu/x_pi grid
        # dec_grid = np.fromfunction(
        #     lambda j, i: 10**(np.log10(bc[1] / bc[0]) * (j - i)), (len(bc),
        #                                                            len(bc)))

        # dec_grid = np.outer(bc, 1 / bc)

        dec_bins = np.outer(self.xbins, 1 / bc)
        dec_bins_lower = dec_bins[:-1]
        dec_bins_upper = dec_bins[1:]

        # dec_grid[dec_grid > 1.] *= 0.
        # The differential element dx_mu/x_pi
        int_scale = np.tile(bw / bc, (len(bc), 1))

        from functools import lru_cache
        @lru_cache(maxsize=512, typed=False)
        def decay_cached(mother,daughter):
            dec_dist = int_scale * decs.get_decay_matrix_bin_average(
                mother, daughter, dec_bins_lower, dec_bins_upper)
            
            return dec_dist

        def convolve_with_decay_distribution(diff_dist, mother, daughter,
                                             branching_ratio):
            r"""Computes the prompt decay xdist by convolving the x distribution
            of the unstable particle with the decay product distribution.

            :math:`\frac{{\rm d}N^{A\gamma \to \mu}}{{\rm d}x_j} = 
            \sum_{i=0}^{N_x}~\Delta x_i 
            \frac{{\rm d}N^{A\gamma \to \pi}}{{\rm d} x_i}~
            \frac{{\rm d}N^{\pi \to \mu}}{{\rm d} x_j}`
            """
            # dec_dist = int_scale * decs.get_decay_matrix(
            #     mother, daughter, dec_grid)
            dec_dist = decay_cached(mother,daughter)

            info(20, 'convolving with decay dist', mother, daughter)
            # Handle the case where table entry is (energy_grid, matrix)
            if not isinstance(diff_dist, tuple):
                return branching_ratio * dec_dist.dot(diff_dist)
            else:
                return diff_dist[0], branching_ratio * dec_dist.dot(
                    diff_dist[1])

        def follow_chain(first_mo, da, csection, reclev):
            """Recursive function to follow decay chains until all
            final state particles are stable.
            
            The result is saved in two dictionaries; one for the boost
            conserving inclusive channels and the other one collects
            channels with meson or lepton decay products, which will
            need special care due to energy redistributions of these
            secondaries.
            """

            info(10, dbg_indent(reclev), 'Entering with', first_mo, da)

            if da not in spec_data:
                info(
                    3, dbg_indent(reclev),
                    'daughter {0} unknown, forcing beta decay. Not Implemented yet!!'
                    .format(da))
                return

            # Daughter is stable. Add it to the new dictionary and terminate
            # recursion
            if spec_data[da]["lifetime"] >= threshold:
                if self.is_differential(None, da):
                    # If the daughter is a meson or lepton, use the dictionary for
                    # differential channels
                    info(
                        20, dbg_indent(reclev),
                        'daughter {0} stable and differential. Adding to ({1}, {2})'
                        .format(da, first_mo, da))
                    new_dec_diff_tab[(first_mo, da)] = csection
                else:
                    info(
                        20, dbg_indent(reclev),
                        'daughter {0} stable. Adding to ({1}, {2})'.format(
                            da, first_mo, da))
                    new_incl_tab[(first_mo, da)] = csection
                return

            # ..otherwise follow decay products of this daughter, tracking the
            # original mother particle (first_mo). The cross section (csection) is
            # reduced by the branching ratio (br) of this particular channel
            for br, daughters in spec_data[da]["branchings"]:
                info(10, dbg_indent(reclev),
                     ("{3} -> {0:4d} -> {2:4.2f}: {1}").format(
                         da, ", ".join(map(str, daughters)), br, first_mo))

                for chained_daughter in daughters:
                    # Follow each secondary and increment the recursion level by one
                    if self.is_differential(None, chained_daughter):
                        info(10, 'daughter', chained_daughter, 'of', da,
                             'is differential')
                        follow_chain(
                            first_mo, chained_daughter,
                            convolve_with_decay_distribution(
                                self._arange_on_xgrid(csection), da,
                                chained_daughter, br), reclev + 1)
                    else:
                        follow_chain(first_mo, chained_daughter, br * csection,
                                     reclev + 1)

        # Remove all unstable particles from the dictionaries
        for mother in sorted(self._nonel_tab.keys()):
            if mother not in spec_data or spec_data[mother][
                    "lifetime"] < threshold:
                info(
                    20,
                    "Primary species {0} does not fulfill stability criteria.".
                    format(mother))
                _ = self._nonel_tab.pop(mother)
        # Only stable (interacting) mother particles are left
        self._update_indices()

        for (mother, daughter) in self.incl_idcs:

            if mother not in self.nonel_idcs:
                info(
                    30, "Removing {0}/{1} from incl, since mother not stable ".
                    format(mother, daughter))
                _ = self._incl_tab.pop((mother, daughter))

            elif self.is_differential(mother, daughter):
                # Move the distributions which are expected to be differential
                # to _incl_diff_tab
                self._incl_diff_tab[(mother,
                                     daughter)] = self._arange_on_xgrid(
                                         self._incl_tab.pop(
                                             (mother, daughter)))

        self._update_indices()

        for (mother, daughter) in self.incl_diff_idcs:

            if mother not in self.nonel_idcs:
                info(
                    30,
                    "Removing {0}/{1} from diff incl, since mother not stable "
                    .format(mother, daughter))
                _ = self._incl_diff_tab.pop((mother, daughter))

        self._update_indices()

        # Launch the reduction for each inclusive channel
        for (mo, da), value in list(self._incl_tab.items()):
            #print mo, da, value
            #print '---'*30
            follow_chain(mo, da, value, 0)

        for (mo, da), value in list(self._incl_diff_tab.items()):
            #print mo, da, value
            #print '---'*30
            follow_chain(mo, da, value, 0)

        # Overwrite the old incl dictionary
        self._incl_tab = dict(new_incl_tab)
        # Overwrite the old incl_diff dictionary
        self._incl_diff_tab = dict(new_dec_diff_tab)
        # Reduce also the incl_diff_tab by removing the unknown mothers. At this stage
        # of the code, the particles with redistributions are
        info(
            3,
            ("After optimization, the number of known primaries is {0} with " +
             "in total {1} inclusive channels").format(
                 len(self._nonel_tab),
                 len(self._incl_tab) + len(self._incl_diff_tab)))
        info(2, f'Cache used for decays, {decay_cached.cache_info()}') # pylint:disable=no-value-for-parameter
Ejemplo n.º 26
0
    def _optimize_and_generate_index(self):
        """Construct a list of mothers and (mother, daughter) indices.

        Args:
            just_reactions (bool): If True then fill just the reactions index.
        """

        # Integrate out short lived processes and leave only stable particles
        # in the databases
        self._reduce_channels()

        # Go through all three cross section categories
        # index contents in the ..known..variable
        self.reactions = {}

        self._update_indices()

        for mo, da in self.incl_idcs:
            if da >= 100 and get_AZN(da)[0] > get_AZN(mo)[0]:
                raise Exception(
                    'Daughter {0} heavier than mother {1}. Physics??'.format(
                        da, mo))

            if mo not in self.reactions:
                self.reactions[mo] = []
                self.known_species.append(mo)

            if (mo, da) not in self.reactions[mo]:
                # Make sure it's a unique list
                self.reactions[mo].append((mo, da))
            if self.is_differential(mo, da):
                # Move the distributions which are expected to be differential
                # to _incl_diff_tab
                self._incl_diff_tab[(mo, da)] = self._arange_on_xgrid(
                    self._incl_tab.pop((mo, da)))
                info(10, "Channel {0} -> {1} forced to be differential.")
            else:
                self.known_bc_channels.append((mo, da))
                self.known_species.append(da)

        for mo, da in list(self._incl_diff_tab.keys()):
            if da >= 100 and get_AZN(da)[0] > get_AZN(mo)[0]:
                raise Exception(
                    'Daughter {0} heavier than mother {1}. Physics??'.format(
                        da, mo))

            if mo not in self.reactions:
                self.reactions[mo] = []
                self.known_species.append(mo)

            if (mo, da) not in self.reactions[mo]:
                # Make sure it's a unique list to avoid unnecessary loops
                self.reactions[mo].append((mo, da))
                self.known_diff_channels.append((mo, da))
                self.known_species.append(da)

        # Remove duplicates
        self.known_species = sorted(list(set(self.known_species)))
        self.known_bc_channels = sorted(list(set(self.known_bc_channels)))
        self.known_diff_channels = sorted(list(set(self.known_diff_channels)))

        for sp in self.known_species:
            if sp >= 100 and (sp, sp) not in self.known_diff_channels:
                self.known_bc_channels.append((mo, mo))
            if (mo, mo) not in self.reactions[mo]:
                self.reactions[mo].append((mo, mo))

        # Make sure the indices are up to date
        self._update_indices()
Ejemplo n.º 27
0
def nu_from_beta_decay(x_grid, mother, daughter, Gamma=200, angle=None):
    """
    Energy distribution of a neutrinos from beta-decays of mother to daughter
    The res frame distrution is boosted to the observers frame and then angular averaging is done numerically

    Args:
      x_grid (float): energy fraction transferred to the secondary
      mother (int): id of mother
      daughter (int): id of daughter
      Gamma (float): Lorentz factor of the parent particle, default: 200
                     For large Gamma this should not play a role, as the decay is scale invariant
      angle (float): collision angle, if None this will be averaged over 2 pi
    Returns:
      float: probability density on x_grid
    """
    import warnings

    info(10, 'Calculating neutrino energy from beta decay', mother, daughter)

    mass_el = spec_data[20]['mass']
    mass_mo = spec_data[mother]['mass']
    mass_da = spec_data[daughter]['mass']

    Z_mo = spec_data[mother]['charge']
    Z_da = spec_data[daughter]['charge']

    A_mo, _, _ = get_AZN(mother)

    if mother == 100 and daughter == 101:
        # for this channel the masses are already nucleon masses
        qval = mass_mo - mass_da - mass_el
    elif Z_da == Z_mo - 1:  # beta+ decay
        qval = mass_mo - mass_da - 2 * mass_el
    elif Z_da == Z_mo + 1:  # beta- decay
        qval = mass_mo - mass_da
    else:
        raise Exception('Not an allowed beta decay channel: {:} -> {:}'.format(
            mother, daughter))

    # substitute this to the energy grid
    E0 = qval + mass_el
    # NOTE: we subsitute into energy per nucleon here
    Emo = Gamma * mass_mo / A_mo
    E = x_grid * Emo

    # print '------','beta decay','------'
    # print mother
    # print E0
    # print A_mo
    # print Emo

    if angle is None:
        # ctheta = np.linspace(-1, 1, 1000)
        # we use here logspace, as high resolution is mainly needed at small energies
        # otherwise the solution will oscillate at low energy
        ctheta = np.unique(
            np.concatenate((
                np.logspace(-8, 0, 1000) - 1,
                1 - np.logspace(0, -8, 1000),
            )))
    else:
        ctheta = angle

    boost = Gamma * (1 - ctheta)
    Emax = E0 * boost

    E_mesh, boost_mesh = np.meshgrid(E, boost, indexing='ij')
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        res = E_mesh**2 / boost_mesh**5 * (Emax - E_mesh) * np.sqrt(
            (E_mesh - Emax)**2 - boost_mesh**2 * mass_el**2)
    res[E_mesh > Emax] = 0.
    res = np.nan_to_num(res)

    if np.all(res == 0):
        info(10, 'Differential distribution is all zeros for', mother,
             daughter, 'No angle averaging performed!')
    elif angle is None:
        # now average over angle
        res = trapz(res, x=ctheta, axis=1)
        res = res / trapz(res, x=x_grid)
    else:
        res = res[:, 0]
        res = res / trapz(res, x=x_grid)

    return res
Ejemplo n.º 28
0
def get_decay_matrix(mo, da, x_grid):
    """
    Selects the correct redistribution for the given decay channel.
    If the channel is unknown a zero grid is returned instead of raising an error

    Args:
      mo (int): index of the mother
      da (int): index of the daughter
      x_grid (float): grid in x = E_da / E_mo on which to return the result
                      (If x is a 2D matrix only the last column is computed
                      and then repeated over the matrix assuming that the 
                      main diagonal is always x = 1)
    Returns:
      float: redistribution on the grid mo_energy / da_energy
    """

    info(10, 'Generating decay redistribution for', mo, da)

    # --------------------------------
    # pi+ to numu or pi- to nummubar
    # --------------------------------
    if mo in [2, 3] and da in [13, 14]:
        return pion_to_numu(x_grid)

    # --------------------------------
    # pi+ to mu+ or pi- to mu-
    # --------------------------------
    elif mo in [2, 3] and da in [5, 6, 7, 8, 9, 10]:
        # (any helicity)
        if da in [7, 10]:
            return pion_to_muon(x_grid)
        # left handed, hel = -1
        elif da in [5, 8]:
            return pion_to_muon(x_grid) * prob_muon_hel(x_grid, -1.)
        # right handed, hel = 1
        elif da in [6, 9]:
            return pion_to_muon(x_grid) * prob_muon_hel(x_grid, 1.)
        else:
            raise Exception(
                'This should newer have happened, check if-statements above!')

    # --------------------------------
    # muon to neutrino
    # --------------------------------
    elif mo in [5, 6, 7, 8, 9, 10] and da in [11, 12, 13, 14]:
        # translating muon ids to helicity
        muon_hel = {
            5: 1.,
            6: -1.,
            7: 0.,
            8: 1.,
            9: -1.,
            10: 0.,
        }
        hel = muon_hel[mo]
        # muon+ to electron neutrino
        if mo in [5, 6, 7] and da in [11]:
            return muonplus_to_nue(x_grid, hel)
        # muon+ to muon anti-neutrino
        elif mo in [5, 6, 7] and da in [14]:
            return muonplus_to_numubar(x_grid, hel)
        # muon- to elec anti-neutrino
        elif mo in [8, 9, 10] and da in [12]:
            return muonplus_to_nue(x_grid, -1 * hel)
        # muon- to muon neutrino
        elif mo in [8, 9, 10] and da in [13]:
            return muonplus_to_numubar(x_grid, -1 * hel)

    # --------------------------------
    # neutrinos from beta decays
    # --------------------------------

    # beta-
    elif mo > 99 and da == 11:
        info(10, 'nu_e from beta- decay', mo, mo - 1, da)
        return nu_from_beta_decay(x_grid, mo, mo - 1)
    # beta+
    elif mo > 99 and da == 12:
        info(10, 'nubar_e from beta+ decay', mo, mo + 1, da)
        return nu_from_beta_decay(x_grid, mo, mo + 1)
    # neutron
    elif mo > 99 and 99 < da < 200:
        info(10, 'beta decay boost conservation', mo, da)
        return boost_conservation(x_grid)
    else:
        info(
            5,
            'Called with unknown channel {:} to {:}, returning an empty redistribution'
            .format(mo, da))
        # no known channel, return zeros
        return np.zeros(x_grid.shape)
Ejemplo n.º 29
0
def get_decay_matrix_bin_average(mo, da, x_lower, x_upper):
    """
    Selects the correct redistribution for the given decay channel.
    If the channel is unknown a zero grid is returned instead of raising an error

    Args:
      mo (int): index of the mother
      da (int): index of the daughter
      x_grid (float): grid in x = E_da / E_mo on which to return the result

    Returns:
      float: redistribution on the grid mo_energy / da_energy
    """

    # TODO: Some of the distribution are not averaged yet.
    # The error is small for smooth distributions though
    info(10, 'Generating decay redistribution for', mo, da)

    x_grid = (x_upper + x_lower) / 2

    # remember shape, but only calculate for last column, as x repeats in each column
    from scipy.integrate import trapz
    shape = x_grid.shape

    if len(shape) == 2:
        x_grid = x_grid[:, -1]
        x_upper = x_upper[:, -1]
        x_lower = x_lower[:, -1]

    # --------------------------------
    # pi+ to numu or pi- to nummubar
    # --------------------------------
    if mo in [2, 3] and da in [13, 14]:
        result = pion_to_numu_avg(x_lower, x_upper)

    # --------------------------------
    # pi+ to mu+ or pi- to mu-
    # --------------------------------
    # TODO: The helicity distr need to be averaged analyticaly
    elif mo in [2, 3] and da in [5, 6, 7, 8, 9, 10]:
        # (any helicity)
        if da in [7, 10]:
            result = pion_to_muon_avg(x_lower, x_upper)
        # left handed, hel = -1
        elif da in [5, 8]:
            result = pion_to_muon_avg(x_lower, x_upper) * prob_muon_hel(
                x_grid, -1.)
        # right handed, hel = 1
        elif da in [6, 9]:
            result = pion_to_muon_avg(x_lower, x_upper) * prob_muon_hel(
                x_grid, 1.)
        else:
            raise Exception(
                'This should newer have happened, check if-statements above!')

    # --------------------------------
    # muon to neutrino
    # --------------------------------
    # TODO: The following distr need to be averaged analyticaly
    elif mo in [5, 6, 7, 8, 9, 10] and da in [11, 12, 13, 14]:
        # translating muon ids to helicity
        muon_hel = {
            5: 1.,
            6: -1.,
            7: 0.,
            8: 1.,
            9: -1.,
            10: 0.,
        }
        hel = muon_hel[mo]
        # muon+ to electron neutrino
        if mo in [5, 6, 7] and da in [11]:
            result = muonplus_to_nue(x_grid, hel)
        # muon+ to muon anti-neutrino
        elif mo in [5, 6, 7] and da in [14]:
            result = muonplus_to_numubar(x_grid, hel)
        # muon- to elec anti-neutrino
        elif mo in [8, 9, 10] and da in [12]:
            result = muonplus_to_nue(x_grid, -1 * hel)
        # muon- to muon neutrino
        elif mo in [8, 9, 10] and da in [13]:
            result = muonplus_to_numubar(x_grid, -1 * hel)

    # --------------------------------
    # neutrinos from beta decays
    # --------------------------------
    # TODO: The following beta decay to neutrino distr need to be averaged analyticaly
    # TODO: Also the angular averaging is done numerically still
    # beta-
    elif mo > 99 and da == 11:
        info(10, 'nu_e from beta+ decay', mo, mo - 1, da)
        result = nu_from_beta_decay(x_grid, mo, mo - 1)
    # beta+
    elif mo > 99 and da == 12:
        info(10, 'nubar_e from beta- decay', mo, mo + 1, da)
        result = nu_from_beta_decay(x_grid, mo, mo + 1)
    # neutron
    elif mo > 99 and 99 < da < 200:
        info(10, 'beta decay boost conservation', mo, da)
        result = boost_conservation_avg(x_lower, x_upper)
    else:
        info(
            5,
            'Called with unknown channel {:} to {:}, returning an empty redistribution'
            .format(mo, da))
        # no known channel, return zeros
        result = np.zeros(x_grid.shape)

    # now fill this into diagonals of matrix
    if len(shape) == 2:
        #'filling matrix'
        res_mat = np.zeros(shape)
        for idx, val in enumerate(result[::-1]):
            np.fill_diagonal(res_mat[:, idx:], val)
        result = res_mat

    return result
Ejemplo n.º 30
0
    def _init_matrices(self):
        """ A new take on filling the matrices"""

        # Define some short-cuts
        known_species = self.spec_man.known_species[::-1]
        sp_id_ref = self.spec_man.ncoid2sref
        resp = self.cross_sections.resp
        m_pr = PRINCE_UNITS.m_proton

        # Energy variables
        dcr = self.e_cosmicray.d
        dph = self.e_photon.d
        ecr = self.e_cosmicray.grid
        bcr = self.e_cosmicray.bins
        eph = self.e_photon.grid
        bph = self.e_photon.bins
        delta_ec = self.e_cosmicray.widths
        delta_ph = self.e_photon.widths

        # Edges of each CR energy bin and photon energy bin
        elims = np.vstack([bcr[:-1], bcr[1:]])
        plims = np.vstack([bph[:-1], bph[1:]])

        # CR and photon grid indices
        emo_idcs = np.arange(dcr)
        eda_idcs = np.arange(dcr)
        p_idcs = np.arange(dph)

        # values for x and y to cut on:
        x_cut = config.x_cut
        y_cut = config.y_cut
        x_cut_proton = config.x_cut_proton

        ibatch = 0
        import itertools
        spec_iter = itertools.product(known_species, known_species)
        for moid, daid in spec_iter:

            if moid < 100:
                continue
            else:
                info(10, f'Filling channel {moid} -> {daid}')

            has_nonel = moid == daid
            if has_nonel:
                intp_nonel = resp.nonel_intp[moid].antiderivative()

            if (((moid, daid) in self.cross_sections.known_bc_channels) or
                (has_nonel and
                 (moid, daid) not in self.cross_sections.known_diff_channels)):

                has_incl = (moid, daid) in resp.incl_intp
                if has_incl:
                    intp_bc = resp.incl_intp[(moid, daid)].antiderivative()
                else:
                    info(1, 'Inclusive interpolator not found for',
                         (moid, daid))

                if not (has_nonel or has_incl):
                    raise Exception('Channel without interactions:',
                                    (moid, daid))

                # The cross sections need to be evaluated
                # on x = E_{CR,da} / E_{CR,mo} and y = E_ph * E_{CR,mo} / m_proton
                # To vectorize the evaluation, we create outer products using numpy
                # broadcasting:

                emo = ecr
                xl = elims[0] / emo
                xu = elims[1] / emo
                delta_x = delta_ec / emo

                yl = plims[0, None, :] * emo[:, None] / m_pr
                yu = plims[1, None, :] * emo[:, None] / m_pr
                delta_y = delta_ph[None, :] * emo[:, None] / m_pr

                int_fac = (delta_ec[:, None] * delta_ph[None, :] /
                           emo[:, None])
                diff_fac = 1. / delta_x[:, None] / delta_y

                # This takes the average by evaluating the integral and dividing by bin
                # width
                if has_incl:
                    self._batch_matrix[ibatch:ibatch + len(emo), :] = (
                        intp_bc(yu) - intp_bc(yl)) * int_fac * diff_fac
                if has_nonel:
                    self._batch_matrix[ibatch:ibatch + len(emo), :] -= (
                        intp_nonel(yu) - intp_nonel(yl)) * int_fac * diff_fac

                # finally map this to the coupling matrix
                ibatch += len(emo)
                self._batch_rows.append(sp_id_ref[daid].lidx() + eda_idcs)
                self._batch_cols.append(sp_id_ref[moid].lidx() + emo_idcs)

            elif (moid, daid) in self.cross_sections.known_diff_channels:

                has_redist = (moid, daid) in resp.incl_diff_intp
                if has_redist:
                    intp_diff = resp.incl_diff_intp[(moid, daid)]
                    intp_diff_integral = resp.incl_diff_intp_integral[(moid,
                                                                       daid)]
                    intp_nonel = resp.nonel_intp[moid]
                    intp_nonel_antid = resp.nonel_intp[moid].antiderivative()

                    ymin = np.min(intp_diff.get_knots()[1])
                else:
                    raise Exception('This should not occur.')

                ibatch_bf = ibatch
                # generate outer products using broadcasting
                emo = ecr[:, None, None]
                eda = ecr[None, :, None]
                epho = eph[None, None, :]
                target_shape = np.ones_like(emo * eda * epho)

                xl = elims[0, None, :, None] / emo * target_shape
                xu = elims[1, None, :, None] / emo * target_shape
                delta_x = delta_ec[None, :, None] / emo

                yl = plims[0, None, None, :] * emo / m_pr * target_shape
                yu = plims[1, None, None, :] * emo / m_pr * target_shape
                delta_y = delta_ph[None, None, :] * emo / m_pr

                int_fac = (delta_ec[:, None, None] * delta_ph[None, None, :] /
                           emo) * target_shape
                diff_fac = 1. / delta_x / delta_y

                # Generate boolean arrays to cut on xvalues
                if daid == 101:
                    cuts = np.logical_and(xl >= x_cut_proton, xl <= 1)
                else:
                    # or (yu < ymin) or (yl > y_cut)
                    cuts = np.logical_and(xl >= x_cut, xl <= 1)
                cuts = cuts[:, :, 0]

                # # NOTE JH: This is an old version, which brute force vectorizes the integral with numpy
                # # I am leaving this in the comments, in case we want to go back for testing-
                # integrator = np.vectorize(intp_diff.integral)
                # res = integrator(xl[cuts], xu[cuts], yl[cuts], yu[cuts]) * diff_fac[cuts] * int_fac[cuts]

                # This takes the average by evaluating the integral and dividing by bin width
                # intp_diff_integral contains the antiderivate, to to get the integral (xl,yl,xu,yu)
                # we need to substract INT = (0,0,xu,yu) - (0,0,xl,yu) - (0,0,xu,yl) +
                # (0,0,xl,yl)
                res = intp_diff_integral.ev(xu[cuts], yu[cuts])
                res -= intp_diff_integral.ev(xl[cuts], yu[cuts])
                res -= intp_diff_integral.ev(xu[cuts], yl[cuts])
                res += intp_diff_integral.ev(xl[cuts], yl[cuts])
                res *= diff_fac[cuts] * int_fac[cuts]
                res[res < 0] = 0.

                # Since we made cuts on x, we need to make the same cut on the index
                # mapping
                emoidx, edaidx, _ = np.meshgrid(
                    sp_id_ref[moid].lidx() + emo_idcs,
                    sp_id_ref[daid].lidx() + eda_idcs,
                    p_idcs,
                    indexing='ij')
                emoidx, edaidx = emoidx[cuts], edaidx[cuts]

                # Now add the nonel interactions on the main diagonal
                if has_nonel:
                    res[emoidx == edaidx] -= (
                        intp_nonel_antid(yu[cuts][emoidx == edaidx]) -
                        intp_nonel_antid(yl[cuts][emoidx == edaidx])) * (
                            diff_fac[cuts][emoidx == edaidx] *
                            int_fac[cuts][emoidx == edaidx])

                # Finally write this to the batch matrix
                self._batch_matrix[ibatch:ibatch + len(emoidx), :] = res
                self._batch_rows.append(edaidx[:, 0])
                self._batch_cols.append(emoidx[:, 0])
                ibatch += len(emoidx)

            else:
                info(20, 'Species combination not included in model', moid,
                     daid)

        self._batch_matrix = self._batch_matrix[:ibatch, :]
        self._batch_rows = np.concatenate(self._batch_rows, axis=None)
        self._batch_cols = np.concatenate(self._batch_cols, axis=None)
        self._batch_vec = np.zeros(ibatch)

        info(2, f'Batch matrix shape: {self._batch_matrix.shape}')
        info(2, f'Batch rows shape: {self._batch_rows.shape}')
        info(2, f'Batch cols shape: {self._batch_cols.shape}')
        info(2, f'Batch vector shape: {self._batch_vec.shape}')

        memory = (self._batch_matrix.nbytes + self._batch_rows.nbytes +
                  self._batch_cols.nbytes + self._batch_vec.nbytes) / 1024**2
        info(3, "Memory usage after initialization: {:} MB".format(memory))