def setup_function(self): self.data.data_specs = self.calc_specs # Check there are muons in the data assert self.input_names[ 0] in self.data.names, "No `%s` events found in the input data, only found %s" % ( self.input_names[0], self.data.names) # Create the primary uncertainties spline that will be used for # re-weighting the muon flux self.prim_unc_spline = self._make_prim_unc_spline() # Get variable that the flux uncertainties are spline w.r.t rw_variable = self.params['delta_gamma_mu_variable'].value #assert rw_variable in self.data[self.input_names[0]], "Cannot find the variable `%s` in the muon container, cannot interpret spline" % rw_variable #TODO Fix in container.py, `in` doesn't work... # Get primary CR systematic spline (using correcr FTYPE) self.rw_array = self.prim_unc_spline( self.data[self.input_names[0]][rw_variable]).astype(FTYPE) # Reweighting term is positive-only by construction, so normalise # it by shifting the whole array down by a normalisation factor norm = FTYPE(sum(self.rw_array)) / FTYPE(len(self.rw_array)) self.cr_rw_array = self.rw_array - norm
def __init__(self, **std_kwargs): self._central_gamma = FTYPE(-2.5) self._central_norm = FTYPE(0.787e-18) self._e_ratio = FTYPE(1.0) self._mu_ratio = FTYPE(1.0) self._tau_ratio = FTYPE(1.0) expected_params = ("astro_delta", "astro_norm") super().__init__( expected_params=expected_params, **std_kwargs, )
def imul_and_scale(vals, scale, out): """Multiply and scale augmented assignment .. :: out[:] *= vals[:] * scale """ imul_and_scale_gufunc(vals, FTYPE(scale), out=out)
def pow(vals, pwr, out): # pylint: disable=redefined-builtin """Raise vals to pwr.. :: out[:] = vals[:]**pwr """ pow_gufunc(vals, FTYPE(pwr), out=out)
def compute_function(self): scale = self.params.nutau_xsec_scale.value.m_as('dimensionless') for container in self.data: if container.name in ["nutau_cc", "nutaubar_cc"]: calc_scale_vectorized(container["nutau_xsec_func"], FTYPE(scale), out=container["nutau_xsec_scale"])
def scale(vals, scale, out): """Multiply .. :: out[:] = vals[:] * scale """ scale_gufunc(vals, FTYPE(scale), out=out)
def imul_and_scale(vals, scale, out): """Multiply and scale augmented assignment .. :: out[:] *= vals[:] * scale """ imul_and_scale_gufunc(vals.get(WHERE), FTYPE(scale), out=out.get(WHERE)) out.mark_changed(WHERE)
def pow(vals, pwr, out): # pylint: disable=redefined-builtin """Raise vals to pwr.. :: out[:] = vals[:]**pwr """ pow_gufunc(vals.get(WHERE), FTYPE(pwr), out=out.get(WHERE)) out.mark_changed(WHERE)
def scale(vals, scale, out): """Multiply .. :: out[:] = vals[:] * scale """ scale_gufunc(vals.get(WHERE), FTYPE(scale), out=out.get(WHERE)) out.mark_changed(WHERE)
def compute_function(self): """ Tilt it, scale it, bop it """ self.data.representation = self.calc_mode delta = self.params.astro_delta.value.m_as("dimensionless") norm = self.params.astro_norm.value for container in self.data: apply_sys_loop( container["true_energy"], container["true_coszen"], FTYPE(delta), FTYPE(norm), container["astro_flux_nominal"], out=container["astro_flux"], ) container.mark_changed("astro_flux")
def apply_function(self): dis_csms = self.params.dis_csms.m_as('dimensionless') for container in self.data: apply_dis_sys( container['dis_correction_total'].get(WHERE), container['dis_correction_diff'].get(WHERE), FTYPE(dis_csms), out=container['weights'].get(WHERE), ) container['weights'].mark_changed(WHERE)
def apply_function(self): theta = self.params.theta.value.m_as('dimensionless') deltam31 = self.params.deltam31.value.m_as('eV**2') for container in self.data: if 'numu' in container.name: apply_probs_vectorized( container['nu_flux'], FTYPE(theta), FTYPE(deltam31), container['true_energy'], container['true_coszen'], ITYPE(1), out=container['weights'], ) if 'nutau' in container.name: apply_probs_vectorized( container['nu_flux'], FTYPE(theta), FTYPE(deltam31), container['true_energy'], container['true_coszen'], ITYPE(3), out=container['weights'], ) if 'nue' in container.name: apply_probs_vectorized( container['nu_flux'], FTYPE(theta), FTYPE(deltam31), container['true_energy'], container['true_coszen'], ITYPE(0), out=container['weights'], ) container.mark_changed('weights')
def replace_where_counts_gt(vals, counts, min_count, out): """Replace `out[i]` with `vals[i]` where `counts[i]` > `min_count`""" replace_where_counts_gt_gufunc(vals, counts, FTYPE(min_count), out=out)
def apply_floor(val, out): apply_floor_gufunc(FTYPE(val), out=out.get(WHERE)) out.mark_changed(WHERE)
FTYPE_SIGFIGS = int(np.abs(np.ceil(np.log10(FTYPE_PREC)))) """Significant figures possible given PISA's FTYPE""" EQUALITY_SIGFIGS = min(HASH_SIGFIGS, FTYPE_SIGFIGS) """Significant figures for performing equality comparisons""" EQUALITY_PREC = 10**-EQUALITY_SIGFIGS """Precision ("rtol") for performing equality comparisons""" ALLCLOSE_KW = dict(rtol=EQUALITY_PREC, atol=0, equal_nan=True) """Keyword args to pass to all calls to numpy.allclose""" # Derive the following number via: # >>> from sympy import log, N # >>> str(N(log(2, 10), 40)) LOG10_2 = FTYPE('0.3010299956639811952137388947244930267682') NP_TYPES = (np.ndarray, np.matrix) SEQ_TYPES = (Sequence, np.ndarray, np.matrix) MAP_TYPES = (Mapping, ) COMPLEX_TYPES = NP_TYPES + SEQ_TYPES + MAP_TYPES def isvalidname(x): """Name that is valid to use for a Python variable""" return re.compile(r'\W|^(?=\d)').match(x) is None def isscalar(x): """Check if input is a scalar object.
def test_find_index(): """Unit tests for `find_index` function. Correctness is defined as producing the same histogram as numpy.histogramdd by using the output of `find_index` (ignoring underflow and overflow values). Additionally, -1 should be returned if a value is below the range (underflow) or is nan, and num_bins should be returned for a value above the range (overflow). """ # Negative, positive, integer, non-integer, binary-unrepresentable (0.1) edges basic_bin_edges = [-1, -0.5, -0.1, 0, 0.1, 0.5, 1, 2, 3, 4] failures = 0 for basic_bin_edges in [ # Negative, positive, integer, non-integer, binary-unrepresentable (0.1) edges [-1, -0.5, -0.1, 0, 0.1, 0.5, 1, 2, 3, 4], # A single infinite bin: [-np.inf, np.inf] [], # Half-infinite bins (lower or upper edge) & [-inf, .1, +inf] [0.1], # Single bin with finite edges & +/-inf-edge(s)-added variants [-0.1, 0.1], ]: # Bin edges from above, w/ and w/o +/-inf as left and/or right edges for le, re in [(None, None), (-np.inf, None), (None, np.inf), (-np.inf, np.inf)]: bin_edges = deepcopy(basic_bin_edges) if le is not None: bin_edges = [le] + bin_edges if re is not None: bin_edges = bin_edges + [re] if len(bin_edges) < 2: continue logging.debug('bin_edges being tested: %s', bin_edges) bin_edges = np.array(bin_edges, dtype=FTYPE) num_bins = len(bin_edges) - 1 underflow_idx = -1 overflow_idx = num_bins # # Construct test values to try out # non_finite_vals = [-np.inf, +np.inf, np.nan] # Values within bins (i.e., not on edges) inbin_vals = [] for idx in range(len(bin_edges) - 1): lower_be = bin_edges[idx] upper_be = bin_edges[idx + 1] if np.isfinite(lower_be): if np.isfinite(upper_be): inbin_val = (lower_be + upper_be) / 2 else: inbin_val = lower_be + 10.5 else: if np.isfinite(upper_be): inbin_val = upper_be - 10.5 else: inbin_val = 10.5 inbin_vals.append(inbin_val) # Values above/below bin edges by one unit of floating point # accuracy eps = np.finfo(FTYPE).eps # pylint: disable=no-member below_edges_vals = [FTYPE((1 - eps) * be) for be in bin_edges] above_edges_vals = [FTYPE((1 + eps) * be) for be in bin_edges] test_vals = np.concatenate([ non_finite_vals, bin_edges, inbin_vals, below_edges_vals, above_edges_vals, ]) logging.trace('test_vals = %s', test_vals) # # Run tests # for val in test_vals: val = FTYPE(val) np_histvals, _ = np.histogramdd([val], np.atleast_2d(bin_edges)) nonzero_indices = np.nonzero(np_histvals)[ 0] # select first & only dim if np.isnan(val): assert len(nonzero_indices) == 0, str(len(nonzero_indices)) expected_idx = underflow_idx elif val < bin_edges[0]: assert len(nonzero_indices) == 0, str(len(nonzero_indices)) expected_idx = underflow_idx elif val > bin_edges[-1]: assert len(nonzero_indices) == 0, str(len(nonzero_indices)) expected_idx = overflow_idx else: assert len(nonzero_indices) == 1, str(len(nonzero_indices)) expected_idx = nonzero_indices[0] found_idx = find_index(val, bin_edges) if found_idx != expected_idx: failures += 1 msg = 'val={}, edges={}: Expected idx={}, found idx={}'.format( val, bin_edges, expected_idx, found_idx) logging.error(msg) assert failures == 0, f"{failures} failures, inspect ERROR messages above for info" logging.info('<< PASS : test_find_index >>')
def set_constant(val, out): set_constant_gufunc(FTYPE(val), out=out.get(WHERE)) out.mark_changed(WHERE)
""" stage to implement getting the contribution to fluxes from astrophysical neutrino sources """ import numpy as np from pisa.utils.profiler import profile from pisa import FTYPE, TARGET from pisa.core.stage import Stage from pisa.utils.numba_tools import WHERE, myjit PIVOT = FTYPE(100.0e3) class astrophysical(Stage): """ Stage to apply power law astrophysical fluxes Parameters ---------- params Expected params are .. :: astro_delta : quantity (dimensionless) astro_norm : quantity (dimensionless) TODO: flavor ratio as a parameter? Save for later. """ def __init__(self, **std_kwargs): self._central_gamma = FTYPE(-2.5) self._central_norm = FTYPE(0.787e-18)
you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.''' GAUS_IMPLEMENTATIONS = ('singlethreaded', 'multithreaded') if NUMBA_CUDA_AVAIL: GAUS_IMPLEMENTATIONS += ('cuda', ) PI = FTYPE(np.pi) TWOPI = FTYPE(2 * np.pi) SQRTPI = FTYPE(sqrt(np.pi)) SQRT2PI = FTYPE(sqrt(2 * np.pi)) PISQ = FTYPE(np.pi * np.pi) def gaussians(x, mu, sigma, weights=None, implementation=None, **kwargs): """Sum of multiple Gaussian curves, normalized to have area of 1. Parameters ---------- x : array Points at which to evaluate the sum of Gaussians mu : arrays
def set_constant(val, out): set_constant_gufunc(FTYPE(val), out=out)
def apply_floor(val, out): apply_floor_gufunc(FTYPE(val), out=out)
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.''' if numba is None: class jit(object): """Decorator class to mimic Numba's `jit` when Numba is missing""" def __init__(self, *args, **kwargs): pass def __call__(self, *args): return args[0] else: jit = numba.jit ftype = numba.typeof(FTYPE(1)) @jit(nopython=True, nogil=True, cache=True) def extCalcLayers(cz, r_detector, prop_height, detector_depth, max_layers, min_detector_depth, rhos, YeFrac, YeOuterRadius, default_elec_frac, coszen_limit, radii): """Layer density/distance calculator for each coszen specified. Accelerated with Numba if present. Parameters ---------- cz r_detector prop_height
def compute_function(self): self.data.data_specs = self.calc_specs if self.calc_mode == 'binned': # speed up calculation by adding links # as nominal flux doesn't depend on the (outgoing) flavour self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc', 'nue_nc', 'numu_nc', 'nutau_nc']) self.data.link_containers('nubar', ['nuebar_cc', 'numubar_cc', 'nutaubar_cc', 'nuebar_nc', 'numubar_nc', 'nutaubar_nc']) # # Get params # # Spectral index (and required energy pivot) delta_index = self.params.delta_index.value.m_as("dimensionless") energy_pivot = self.params.energy_pivot.value.m_as("GeV") # Grab the pion ratio pion_ratio = self.params.pion_ratio.value.m_as("dimensionless") # Map the user parameters into the Barr +/- params # pi- production rates is restricted by the pi-ratio, just as in arXiv:0611266 # TODO might want dedicated priors for pi- params (but without corresponding free params) gradient_params_mapping = collections.OrderedDict() gradient_params_mapping["af+"] = self.params.barr_af_Pi.value.m_as("dimensionless") gradient_params_mapping["g+"] = self.params.barr_g_Pi.value.m_as("dimensionless") gradient_params_mapping["h+"] = self.params.barr_h_Pi.value.m_as("dimensionless") gradient_params_mapping["i+"] = self.params.barr_i_Pi.value.m_as("dimensionless") for k in list(gradient_params_mapping.keys()): gradient_params_mapping[k.replace("+", "-")] = self.antipion_production( gradient_params_mapping[k], pion_ratio ) # kaons # as the kaon ratio is unknown, K- production is not restricted gradient_params_mapping["w+"] = self.params.barr_w_K.value.m_as("dimensionless") gradient_params_mapping["w-"] = self.params.barr_w_antiK.value.m_as("dimensionless") gradient_params_mapping["x+"] = self.params.barr_x_K.value.m_as("dimensionless") gradient_params_mapping["x-"] = self.params.barr_x_antiK.value.m_as("dimensionless") gradient_params_mapping["y+"] = self.params.barr_y_K.value.m_as("dimensionless") gradient_params_mapping["y-"] = self.params.barr_y_antiK.value.m_as("dimensionless") gradient_params_mapping["z+"] = self.params.barr_z_K.value.m_as("dimensionless") gradient_params_mapping["z-"] = self.params.barr_z_antiK.value.m_as("dimensionless") # Populate array Barr param array for ( gradient_param_name, gradient_param_idx, ) in self.gradient_param_indices.items(): self.gradient_params[gradient_param_idx] = gradient_params_mapping[ gradient_param_name ] # # Loop over containers # for container in self.data: # # Apply the systematics to the flux # nubar = container["nubar"] if nubar > 0: flux_key = "nu_flux_nominal" elif nubar < 0: flux_key = "nubar_flux_nominal" apply_sys_vectorized( container["true_energy"].get(WHERE), container["true_coszen"].get(WHERE), FTYPE(delta_index), FTYPE(energy_pivot), container[flux_key].get(WHERE), container["gradients"].get(WHERE), self.gradient_params, out=container["nu_flux"].get(WHERE), ) container["nu_flux"].mark_changed(WHERE) # Check for negative results from spline # TODO - add more spline error/misusage handling # e.g. if events have energy outside spline range throw ERROR negative_mask = container["nu_flux"].get("host") < 0 if np.any(negative_mask): container["nu_flux"].get("host")[negative_mask] = 0.0 container["nu_flux"].mark_changed("host") # don't forget to un-link everything again self.data.unlink_containers()