예제 #1
0
파일: roounfold.py 프로젝트: thehrh/pisa-1
    def get_inv_eff(self, signal_data=None, gen_data=None):
        this_hash = hash_obj(
            [self.true_binning.hash, self.output_str, 'inv_eff'],
            full_hash=self.full_hash)
        assert len(set([signal_data is None, gen_data is None])) == 1
        if signal_data is None and gen_data is None:
            if self.inv_eff_hash == this_hash:
                logging.trace('Loading inv eff from mem cache')
                return self._inv_eff
            if this_hash in self.disk_cache:
                logging.debug('Loading inv eff histogram from disk cache.')
                inv_eff = self.disk_cache[this_hash]
            else:
                raise ValueError(
                    'inverse efficiency histogram with correct hash not found '
                    'in disk_cache')
        else:
            this_hash = hash_obj([this_hash, self.fit_hash],
                                 full_hash=self.full_hash)
            if self.inv_eff_hash == this_hash:
                logging.trace('Loading inv eff from mem cache')
                return self._inv_eff
            inv_eff = self._get_inv_eff(signal_data, gen_data,
                                        self.true_binning, self.output_str)

            if self.disk_cache is not None:
                if this_hash not in self.disk_cache:
                    logging.debug('Caching inv eff histogram to disk.')
                    self.disk_cache[this_hash] = inv_eff

        self.inv_eff_hash = this_hash
        self._inv_eff = inv_eff
        return inv_eff
예제 #2
0
    def binned_to_binned(self, key, new_binning):
        '''
        resample a binned key into a different binning

        Parameters
        ----------

        key : str

        new_binning : MultiDimBinning
            the new binning

        '''
        logging.debug('Resampling %s' % (key))
        old_binning, hist = self.binned_data[key]
        sample = [
            self.get_binned_data(name, old_binning)
            for name in old_binning.names
        ]
        new_sample = [
            SmartArray(self.unroll_binning(name, new_binning))
            for name in new_binning.names
        ]
        hist = resample(hist, sample, old_binning, new_sample, new_binning)

        self.add_binned_data(key, (new_binning, hist))
예제 #3
0
파일: roounfold.py 프로젝트: thehrh/pisa-1
    def load_gen_data(self):
        logging.debug('Loading generator level sample')
        unfold_pipeline_cfg = self.params['unfold_pipeline_cfg'].value
        if isinstance(unfold_pipeline_cfg, str):
            pipeline_cfg = from_file(unfold_pipeline_cfg)
            pipeline_hash = pipeline_cfg
            sa_cfg = from_file(
                pipeline_cfg.get('stage.data', 'param.data_sample_config'))
            template_maker = Pipeline(pipeline_cfg)
        elif isinstance(unfold_pipeline_cfg, Pipeline):
            pipeline_hash = unfold_pipeline_cfg.state_hash
            sa_cfg = from_file(
                unfold_pipeline_cfg.params['data_sample_config'].value)
            template_maker = unfold_pipeline_cfg
        gen_cfg = from_file(sa_cfg.get('neutrinos|gen_lvl', 'gen_cfg_file'))
        this_hash = hash_obj([gen_cfg, pipeline_hash, self.output_str],
                             full_hash=self.full_hash)
        if self.gen_data_hash == this_hash:
            return self._gen_data

        full_gen_data = template_maker.get_outputs()
        if not isinstance(full_gen_data, Data):
            raise AssertionError(
                'Output of pipeline is not a Data object, instead is type '
                '{0}'.format(type(full_gen_data)))
        trans_data = full_gen_data.transform_groups(self.output_str)
        gen_data = trans_data[self.output_str]

        self._gen_data = gen_data
        self.gen_data_hash = this_hash
        return gen_data
예제 #4
0
    def __init__(
        self, ebins, czbins, detector_depth=None, earth_model=None, prop_height=None, osc_precision=5e-4, **kwargs
    ):
        """
        Parameters needed to instantiate a NucraftOscillationService:
        * ebins: Energy bin edges
        * czbins: cos(zenith) bin edges
        * earth_model: Earth density model used for matter oscillations.
        * detector_depth: Detector depth in km.
        * prop_height: Height in the atmosphere to begin in km.
                       Default: 'sample', samples from a parametrization to
                       the atmospheric interaction model presented in
                       "Path length distributions of atmospheric neutrinos",
                       Gaisser and Stanev, PhysRevD.57.1977
        * osc_precision: Numerical precision for oscillation probabilities
        """
        OscillationServiceBase.__init__(self, ebins, czbins)
        logging.info("Initializing %s..." % self.__class__.__name__)

        print get_params()
        report_params(get_params(), ["km", "", "", "km"])

        self.prop_height = prop_height  # km above spherical Earth surface
        # print "\n\n self.prop_height: ",self.prop_height
        """ height_mode = 0 ensures that interaction takes place at chosen height """
        """ whereas height_mode = 1 samples single altitude from distribution """
        self.height_mode = 3 if self.prop_height == "sample" else 0
        logging.debug("NuCraft height mode: %d" % self.height_mode)
        self.detector_depth = detector_depth  # km below spherical Earth surface
        self.num_prec = osc_precision
        self.get_earth_model(earth_model)
예제 #5
0
파일: proc.py 프로젝트: olivas/pisa
def report_params(params,units):
    '''
    Print the parameter values with units
    '''
    #Print everything - must be sorted
    for key, unit in zip(sorted(params), units):
        logging.debug("%20s: %.4e %s"%(key,params[key],unit))
예제 #6
0
    def check_kernels(self, kernels):
        """Test whether the reco kernels have the correct shape."""
        # check axes
        logging.debug('Checking binning of reconstruction kernels')
        for kernel_axis, own_axis in [(kernels['ebins'], self.ebins),
                                      (kernels['czbins'], self.czbins)]:
            if not utils.is_equal_binning(kernel_axis, own_axis):
                raise ValueError("Binning of reconstruction kernel doesn't "
                                 "match the event maps!")

        # check shape of kernels
        logging.debug('Checking shape of reconstruction kernels')
        shape = (len(self.ebins) - 1, len(self.czbins) - 1,
                 len(self.ebins) - 1, len(self.czbins) - 1)
        for flavour in kernels:
            if flavour in ['ebins', 'czbins']:
                continue
            for interaction in kernels[flavour]:
                if not np.shape(kernels[flavour][interaction]) == shape:
                    raise IndexError(
                        'Reconstruction kernel for %s/%s has wrong shape: '
                        '%s, %s' %
                        (flavour, interaction, str(shape),
                         str(np.shape(kernels[flavour][interaction]))))

        logging.info('Reconstruction kernels are sane')
        return True
예제 #7
0
def build_fisher_matrix(gradient_hist_flat_d, fiducial_hist, fiducial_params):
    # fix the ordering of parameters
    params = sorted(gradient_hist_flat_d.keys())

    # find non-empty bins in flattened map
    fiducial_hist_flat = fiducial_hist.nominal_values['total'].flatten()
    nonempty = np.nonzero(fiducial_hist_flat)
    logging.debug("Using %u non-empty bins of %u" %
                  (len(nonempty[0]), len(fiducial_hist_flat)))

    # get gradients as calculated above for non-zero bins
    gradients = np.array(
        [gradient_hist_flat_d[par][nonempty] for par in params], dtype=FTYPE)

    # get error estimate from best-fit bin count for non-zero bins
    # TODO: these are not variances
    variances = fiducial_hist['total'].std_devs.flatten()[nonempty]

    # Loop over all parameters per bin (simple transpose) and calculate Fisher
    # matrix by getting the outer product of all gradients in a bin.
    # Result is sum of matrix for all bins.
    fmatrix = np.zeros((len(params), len(params)), dtype=FTYPE)
    for bin_gradients, bin_var in zip(gradients.T, variances):
        fmatrix += np.outer(bin_gradients, bin_gradients) / bin_var

    # construct the fisher matrix object
    fisher = FisherMatrix(
        matrix=fmatrix,
        parameters=params,  #order is important here!
        best_fits=fiducial_params.
        nominal_values,  # TODO: fix order (in the sense of making it definite?)
        priors=None,  #FIXME: support priors
    )

    return fisher, nonempty
예제 #8
0
def test_parse_pipeline_config():
    """Unit test for function `parse_pipeline_config`"""
    from argparse import ArgumentParser

    parser = ArgumentParser()
    parser.add_argument('-p',
                        '--pipeline',
                        metavar='CONFIGFILE',
                        default='settings/pipeline/example.cfg',
                        help='File containing settings for the pipeline.')
    parser.add_argument(
        '-v',
        action='count',
        default=0,
        help='Set verbosity level. Minimum is forced to level 1 (info)')
    args = parser.parse_args()
    args.v = max(1, args.v)
    set_verbosity(args.v)

    # Load via PISAConfigParser
    config0 = PISAConfigParser()
    config0.read(args.pipeline)
    _ = parse_pipeline_config(config0)

    # Load directly
    config = parse_pipeline_config(args.pipeline)

    logging.debug('Keys and values found in config:')
    for key, vals in config.items():
        logging.debug('%s: %s', key, vals)

    logging.info('<< PASS : test_parse_pipeline_config >>')
예제 #9
0
    def __init__(self, ebins, czbins, detector_depth=None, earth_model=None,
                 prop_height=None, osc_precision=5e-4,
                 **kwargs):
        """
        Parameters needed to instantiate a NucraftOscillationService:
        * ebins: Energy bin edges
        * czbins: cos(zenith) bin edges
        * earth_model: Earth density model used for matter oscillations.
        * detector_depth: Detector depth in km.
        * prop_height: Height in the atmosphere to begin in km.
                       Default: 'sample', samples from a parametrization to
                       the atmospheric interaction model presented in
                       "Path length distributions of atmospheric neutrinos",
                       Gaisser and Stanev, PhysRevD.57.1977
        * osc_precision: Numerical precision for oscillation probabilities
        """
        OscillationServiceBase.__init__(self, ebins, czbins)
        logging.info('Initializing %s...'%self.__class__.__name__)

        print get_params()
        report_params(get_params(),['km','','','km'])

        self.prop_height = prop_height # km above spherical Earth surface
        #print "\n\n self.prop_height: ",self.prop_height
 	''' height_mode = 0 ensures that interaction takes place at chosen height '''
	''' whereas height_mode = 1 samples single altitude from distribution '''
        self.height_mode = 3 if self.prop_height == 'sample' else 0
        logging.debug("NuCraft height mode: %d"%self.height_mode)
        self.detector_depth = detector_depth # km below spherical Earth surface
        self.num_prec = osc_precision
        self.get_earth_model(earth_model)
def show_frame(df):
    pd.set_option('display.max_columns', len(df))
    pd.set_option('expand_frame_repr', False)
    pd.set_option('max_rows',20)
    logging.debug("df:\n%s"%df)

    return
예제 #11
0
    def _compute_outputs(self, inputs=None):

        """Apply basic cuts and compute histograms for output channels."""

        logging.debug('Entering sample._compute_outputs')

        self.config = from_file(self.params['data_sample_config'].value)

        name = self.config.get('general', 'name')
        logging.trace('{0} sample sample_hash = '
                      '{1}'.format(name, self.sample_hash))
        self.load_sample_events()

        if self.params['keep_criteria'].value is not None:
            # TODO(shivesh)
            raise NotImplementedError(
                'needs check to make sure this works in a DistributionMaker'
            )
            self._data.applyCut(self.params['keep_criteria'].value)
            self._data.update_hash()

        if self.output_events:
            return self._data

        outputs = []
        if self.neutrinos:
            trans_nu_data = self._data.transform_groups(
                self._output_nu_groups
            )
            for fig in trans_nu_data.keys():
                outputs.append(trans_nu_data.histogram(
                    kinds       = fig,
                    binning     = self.output_binning,
                    weights_col = 'pisa_weight',
                    errors      = True,
                    name        = str(NuFlavIntGroup(fig)),
                ))

        if self.muons:
            outputs.append(self._data.histogram(
                kinds       = 'muons',
                binning     = self.output_binning,
                weights_col = 'pisa_weight',
                errors      = True,
                name        = 'muons',
                tex         = r'\rm{muons}'
            ))

        if self.noise:
            outputs.append(self._data.histogram(
                kinds       = 'noise',
                binning     = self.output_binning,
                weights_col = 'pisa_weight',
                errors      = True,
                name        = 'noise',
                tex         = r'\rm{noise}'
            ))

        name = self.config.get('general', 'name')
        return MapSet(maps=outputs, name=name)
예제 #12
0
파일: Scan.py 프로젝트: gkrueckl/pisa
def calc_steps(params, settings):
    """
    Get the actual grid values for each key. If settings is a list of
    values, use these directly as steps. If settings has single value,
    generate this many steps within the bounds given for the
    parameter.  Parameters are identified by names, or "*" which is
    the default for all parameters
    """

    # Collect the steps settings for each parameter
    for key in params:

        # If specific steps are given, use these
        if key in settings:
            params[key]['steps'] = settings[key]
        else:
            params[key]['steps'] = settings['*']

    # Now convert number of steps to actual steps
    for key in params:
        # ignore if we already have those
        if isinstance(params[key]['steps'], np.ndarray): continue

        #calculate the steps
        lower, upper = params[key]['range']
        nsteps = params[key]['steps']
        params[key]['steps'] = np.linspace(lower, upper, nsteps)

    # report for all
    for name, steps in [(k, v['steps']) for k, v in params.items()]:
        logging.debug("Using %u steps for %s from %.5f to %.5f" %
                      (len(steps), name, steps[0], steps[-1]))
예제 #13
0
    def check_kernels(self, kernels):
        """Test whether the reco kernels have the correct shape."""
        # check axes
        logging.debug('Checking binning of reconstruction kernels')
        for kernel_axis, own_axis in [(kernels['ebins'], self.ebins),
                                      (kernels['czbins'], self.czbins)]:
            if not utils.is_equal_binning(kernel_axis, own_axis):
                raise ValueError("Binning of reconstruction kernel doesn't "
                                 "match the event maps!")

        # check shape of kernels
        logging.debug('Checking shape of reconstruction kernels')
        shape = (len(self.ebins)-1, len(self.czbins)-1,
                 len(self.ebins)-1, len(self.czbins)-1)
        for flavour in kernels:
            if flavour in ['ebins', 'czbins']:
                continue
            for interaction in kernels[flavour]:
                if not np.shape(kernels[flavour][interaction]) == shape:
                    raise IndexError(
                        'Reconstruction kernel for %s/%s has wrong shape: '
                        '%s, %s' %(flavour, interaction, str(shape),
                                   str(np.shape(kernels[flavour][interaction])))
                    )

        logging.info('Reconstruction kernels are sane')
        return True
예제 #14
0
파일: kde.py 프로젝트: icecube/pisa
    def setup_function(self):

        assert isinstance(
            self.apply_mode, MultiDimBinning
        ), f"KDE stage needs a binning as `apply_mode`, but is {self.apply_mode}"

        # For dimensions that are logarithmic, we add a linear binning in
        # the logarithm.
        dimensions = []
        for dim in self.apply_mode:
            if dim.is_lin:
                new_dim = deepcopy(dim)
            # We don't compute the log of the variable just yet, this
            # will be done later during `apply_function` using the
            # representation mechanism.
            # We replace the logarithmic binning with a linear binning in log-space
            elif dim.is_irregular:
                new_dim = OneDimBinning(
                    dim.name,
                    bin_edges=np.log(dim.bin_edges.m),
                )
            else:
                new_dim = OneDimBinning(dim.name,
                                        domain=np.log(dim.domain.m),
                                        num_bins=dim.num_bins)
            dimensions.append(new_dim)

            self.regularized_apply_mode = MultiDimBinning(dimensions)
            logging.debug("Using regularized binning:\n" +
                          repr(self.regularized_apply_mode))
예제 #15
0
    def __init__(self, flux_file=None, smooth=0.05, **params):
        logging.info("Loading atmospheric flux table %s" % flux_file)

        #Load the data table
        table = np.loadtxt(open_resource(flux_file)).T

        #columns in Honda files are in the same order
        cols = ['energy'] + primaries

        flux_dict = dict(zip(cols, table))
        for key in flux_dict.iterkeys():

            #There are 20 lines per zenith range
            flux_dict[key] = np.array(np.split(flux_dict[key], 20))
            if not key == 'energy':
                flux_dict[key] = flux_dict[key].T

        #Set the zenith and energy range
        flux_dict['energy'] = flux_dict['energy'][0]
        flux_dict['coszen'] = np.linspace(0.95, -0.95, 20)

        #Now get a spline representation of the flux table.
        logging.debug('Make spline representation of flux')
        # do this in log of energy and log of flux (more stable)
        logE, C = np.meshgrid(np.log10(flux_dict['energy']),
                              flux_dict['coszen'])

        self.spline_dict = {}
        for nutype in primaries:
            #Get the logarithmic flux
            log_flux = np.log10(flux_dict[nutype]).T
            #Get a spline representation
            spline = bisplrep(logE, C, log_flux, s=smooth)
            #and store
            self.spline_dict[nutype] = spline
예제 #16
0
    def __init__(self, flux_file=None, smooth=0.05, **params):
        logging.info("Loading atmospheric flux table %s" %flux_file)

        #Load the data table
        table = np.loadtxt(open_resource(flux_file)).T

        #columns in Honda files are in the same order
        cols = ['energy']+primaries

        flux_dict = dict(zip(cols, table))
        for key in flux_dict.iterkeys():

            #There are 20 lines per zenith range
            flux_dict[key] = np.array(np.split(flux_dict[key], 20))
            if not key=='energy':
                flux_dict[key] = flux_dict[key].T

        #Set the zenith and energy range
        flux_dict['energy'] = flux_dict['energy'][0]
        flux_dict['coszen'] = np.linspace(0.95, -0.95, 20)

        #Now get a spline representation of the flux table.
        logging.debug('Make spline representation of flux')
        # do this in log of energy and log of flux (more stable)
        logE, C = np.meshgrid(np.log10(flux_dict['energy']), flux_dict['coszen'])

        self.spline_dict = {}
        for nutype in primaries:
            #Get the logarithmic flux
            log_flux = np.log10(flux_dict[nutype]).T
            #Get a spline representation
            spline =  bisplrep(logE, C, log_flux, s=smooth)
            #and store
            self.spline_dict[nutype] = spline
예제 #17
0
    def array_to_binned(self, key, binning, averaged=True):
        '''
        histogram data array into binned data

        Parameters
        ----------

        key : str

        binning : MultiDimBinning

        averaged : bool
            if True, the histogram entries are averages of the numbers that
            end up in a given bin. This for example must be used when oscillation
            probabilities are translated.....otherwise we end up with probability*count
            per bin


        right now CPU only

        ToDo: make work for n-dim

        '''
        logging.debug('Transforming %s array to binned data' % (key))
        weights = self.array_data[key]
        sample = [self.array_data[n] for n in binning.names]

        hist = histogram(sample, weights, binning, averaged)

        self.add_binned_data(key, (binning, hist))
def show_frame(df):
    pd.set_option("display.max_columns", len(df))
    pd.set_option("expand_frame_repr", False)
    pd.set_option("max_rows", 20)
    logging.debug("df:\n%s" % df)

    return
예제 #19
0
파일: Scan.py 프로젝트: mamday/pisa
def calc_steps(params, settings):
    '''
    Get the actual grid values for each key. If settings is a list of
    values, use these directly as steps. If settings has single value,
    generate this many steps within the bounds given for the parameter.
    Parameters are identified by names, or "*" which is the default for all
    parameters
    '''

    #Collect the steps settings for each parameter
    for key in params:

        #If specific steps are given, use these
        if key in settings:
            params[key]['steps'] = settings[key]
        else:
            params[key]['steps'] = settings['*']

    #Now convert number of steps to actual steps
    for key in params:
        #ignore if we already have those
        if isinstance(params[key]['steps'],np.ndarray): continue

        #calculate the steps
        lower, upper = params[key]['range']
        nsteps = params[key]['steps']
        params[key]['steps'] = np.linspace(lower,upper,nsteps)

    #report for all
    for name, steps in [ (k,v['steps']) for k,v in params.items()]:
       logging.debug("Using %u steps for %s from %.5f to %.5f" %
                          (len(steps), name, steps[0], steps[-1]))
예제 #20
0
    def __init__(self,ebins,czbins,aeff_egy_par,aeff_coszen_par,**params):
        '''
        Parameters:
        * aeff_egy_par - effective area vs. Energy 1D parameterizations for each flavor,
        in a text file (.dat)
        * aeff_coszen_par - 1D coszen parameterization for each flavor as a json_string
        code.
        '''
        logging.info('Initializing AeffServicePar...')

        self.ebins = ebins
        self.czbins = czbins


        ## Load the info from .dat files into a dict...
        ## Parametric approach treats all NC events the same
        aeff2d_nc = self.get_aeff_flavor('NC',aeff_egy_par,aeff_coszen_par)
        aeff2d_nc_bar = self.get_aeff_flavor('NC_bar',aeff_egy_par,aeff_coszen_par)

        self.aeff_dict = {}
        logging.info("Creating effective area parametric dict...")
        for flavor in ['nue','nue_bar','numu','numu_bar','nutau','nutau_bar']:
            flavor_dict = {}
            logging.debug("Working on %s effective areas"%flavor)

            aeff2d = self.get_aeff_flavor(flavor,aeff_egy_par,aeff_coszen_par)

            flavor_dict['cc'] = aeff2d
            flavor_dict['nc'] = aeff2d_nc_bar if 'bar' in flavor else aeff2d_nc

            self.aeff_dict[flavor] = flavor_dict

        return
예제 #21
0
파일: jsons.py 프로젝트: mamday/pisa
def to_json(content, filename, indent=2):
    """Write content to a JSON file using a custom parser that automatically
    converts numpy arrays to lists."""
    with open(filename, 'w') as outfile:
        json.dump(content, outfile, cls=NumpyEncoder,
                  indent=indent, sort_keys=True)
        logging.debug('Wrote %.2f kBytes to %s'%
                      (outfile.tell()/1024., os.path.basename(filename)))
예제 #22
0
def test_hash_file():
    """Unit tests for `hash_file` function"""
    file_hash = hash_file(resource_filename('pisa.utils', 'hash.py'))
    logging.debug(file_hash)
    file_hash = hash_file(resource_filename('pisa.utils', 'hash.py'),
                          full_hash=False)
    logging.debug(file_hash)
    logging.info('<< PASS : test_hash_file >>')
예제 #23
0
 def unlink_containers(self):
     '''
     unlink all container
     '''
     logging.debug('Unlinking all containers')
     for c in self.linked_containers:
         c.unlink()
     self.linked_containers = []
예제 #24
0
파일: fit.py 프로젝트: terliuk/pisa
    def _compute_outputs(self, inputs=None):
        """Compute histograms for output channels."""
        logging.debug('Entering fit._compute_outputs')
        if not isinstance(inputs, Data):
            raise AssertionError('inputs is not a Data object, instead is '
                                 'type {0}'.format(type(inputs)))
        self.weight_hash = deepcopy(inputs.metadata['weight_hash'])
        logging.trace('{0} fit weight_hash = '
                      '{1}'.format(inputs.metadata['name'], self.weight_hash))
        logging.trace('{0} fit fit_hash = '
                      '{1}'.format(inputs.metadata['name'], self.fit_hash))
        self._data = inputs
        self.reweight()

        if self.output_events:
            return self._data

        outputs = []
        if self.neutrinos:
            trans_nu_data = self._data.transform_groups(
                self._output_nu_groups
            )
            for fig in trans_nu_data.iterkeys():
                outputs.append(
                    trans_nu_data.histogram(
                        kinds=fig,
                        binning=self.output_binning,
                        weights_col='pisa_weight',
                        errors=True,
                        name=str(NuFlavIntGroup(fig)),
                    )
                )

        if self.muons:
            outputs.append(
                self._data.histogram(
                    kinds='muons',
                    binning=self.output_binning,
                    weights_col='pisa_weight',
                    errors=True,
                    name='muons',
                    tex=text2tex('muons')
                )
            )

        if self.noise:
            outputs.append(
                self._data.histogram(
                    kinds='noise',
                    binning=self.output_binning,
                    weights_col='pisa_weight',
                    errors=True,
                    name='noise',
                    tex=text2tex('noise')
                )
            )

        return MapSet(maps=outputs, name=self._data.metadata['name'])
예제 #25
0
파일: proc.py 프로젝트: mamday/pisa
def report_params(params, units):
    """
    Print the parameter values with units
    """
    formatter = {int: "%i", float: "%.4e", str: "%s", bool: "%s", type(None): "%s"}
    # Print everything - must be sorted
    forms = [formatter[type(v)] for k, v in sorted(params.items())]
    for key, form, unit in zip(sorted(params), forms, units):
        logging.debug(("%20s: " + form + " %s") % (key, params[key], unit))
예제 #26
0
파일: Aeff.py 프로젝트: lkijmj/pisa
def get_event_rates(osc_flux_maps,aeff_service,livetime=None,nu_nubar_ratio=None,
                    aeff_scale=None,**kwargs):
    '''
    Main function for this module, which returns the event rate maps
    for each flavor and interaction type, using true energy and zenith
    information. The content of each bin will be the weighted aeff
    multiplied by the oscillated flux, so that the returned dictionary
    will be of the form:
    {'nue': {'cc':map,'nc':map},
     'nue_bar': {'cc':map,'nc':map}, ...
     'nutau_bar': {'cc':map,'nc':map} }
    \params:
      * osc_flux_maps - maps containing oscillated fluxes
      * aeff_service - the effective area service to use
      * livetime - detector livetime for which to calculate event counts
      * nu_nubar_ratio - systematic to be a proxy for the realistic
        counts_nue(cc/nc) / counts_nuebar(cc/nc), ... ratios,
        keeping the total flavour counts constant.
        The adjusted ratios are given by "nu_nubar_ratio * original ratio".
      * aeff_scale - systematic to be a proxy for the realistic effective area
    '''

    #Get parameters used here
    params = get_params()
    report_params(params,units = ['','yrs',''])

    #Initialize return dict
    event_rate_maps = {'params': add_params(params,osc_flux_maps['params'])}

    #Get effective area
    aeff_dict = aeff_service.get_aeff()

    ebins, czbins = get_binning(osc_flux_maps)

    # apply the scaling for nu_xsec_scale and nubar_xsec_scale...
    flavours = ['nue','numu','nutau','nue_bar','numu_bar','nutau_bar']
    for flavour in flavours:
        osc_flux_map = osc_flux_maps[flavour]['map']
        int_type_dict = {}
        for int_type in ['cc','nc']:
            event_rate = osc_flux_map*aeff_dict[flavour][int_type]*aeff_scale

            event_rate *= (livetime*Julian_year)
            int_type_dict[int_type] = {'map':event_rate,
                                       'ebins':ebins,
                                       'czbins':czbins}
            logging.debug("  Event Rate before reco for %s/%s: %.2f"
                          %(flavour,int_type,np.sum(event_rate)))
        event_rate_maps[flavour] = int_type_dict

    # now scale the nu(e/mu/tau) / nu(e/mu/tau)bar event count ratios, keeping the total
    # (nue + nuebar etc.) constant
    if nu_nubar_ratio != 1.:
        return apply_nu_nubar_ratio(event_rate_maps, nu_nubar_ratio)

    # else: no scaling to be applied
    return event_rate_maps
예제 #27
0
def populate_pid(mc_events,
                 param_source,
                 cut_val=0,
                 random_state=None,
                 dist='discrete',
                 **dist_kwargs):
    """Construct a 'pid' field within the `mc_events` object.

    Parameters
    ----------
    mc_events : pisa.core.Events
    param_source
    cut_val
    random_state
    dist

    """
    random_state = get_random_state(random_state)
    logging.info('  Classifying events as tracks or cascades')

    dist_allowed = ('discrete', 'normal')
    assert dist in dist_allowed

    pid_param = load_pid_energy_param(param_source)

    for flavint in mc_events.flavints:
        pid_funcs = None
        for flavintgroup, funcs in pid_param.iteritems():
            if flavint in flavintgroup:
                pid_funcs = funcs
        if pid_funcs is None:
            raise ValueError('Could not find pid param for %s' % flavint)

        reco_energies = mc_events[flavint]['reco_energy']
        track_pid_probs = pid_funcs['track'](reco_energies)
        cascade_pid_probs = pid_funcs['cascade'](reco_energies)
        assert np.all(np.isclose(track_pid_probs + cascade_pid_probs, 1))
        if dist == 'discrete':
            logging.debug('  Drawing discrete PID values')
            rands = random_state.uniform(size=len(reco_energies))
            pid_vals = np.where(rands <= track_pid_probs, cut_val + 1,
                                cut_val - 1)
        elif dist == 'normal':
            logging.debug('  Drawing normally distributed PID values')
            # cascades fall below `cut_val`, tracks above
            locs_shifted = cut_val - norm.ppf(cascade_pid_probs, **dist_kwargs)
            assert recursiveEquality(
                norm(loc=locs_shifted, **dist_kwargs).cdf(cut_val),
                cascade_pid_probs)
            rv = norm(loc=locs_shifted, **dist_kwargs)
            # size is important in the following, as otherwise all samples are
            # 100% correlated
            pid_vals = rv.rvs(size=len(reco_energies))
        mc_events[flavint]['pid'] = pid_vals.astype(FTYPE)

    return mc_events
예제 #28
0
파일: dfUtils.py 프로젝트: tarlen5/pisa
def show_frame(df,nrows=20):
    """
    Shows all columns of data frame, no matter how large it is. Number
    of rows to show is configurable.
    """
    pd.set_option('display.max_columns', len(df))
    pd.set_option('expand_frame_repr', False)
    pd.set_option('max_rows',nrows)
    logging.debug("df:\n%s"%df)

    return
예제 #29
0
    def bivariate_spline(flux_dict, cz_centers, en_centers, smooth=0.02):
        """Spline the flux."""
        logging.debug('Entering mceq.bivariate_spline')
        Cz, logE = np.meshgrid(cz_centers, np.log10(en_centers))

        spline_dict = OrderedDict()
        for nu in flux_dict.iterkeys():
            log_flux = np.log10(flux_dict[nu]).T
            spline = interpolate.bisplrep(Cz, logE, log_flux, s=smooth)
            spline_dict[nu] = spline
        return spline_dict
예제 #30
0
파일: Aeff.py 프로젝트: gkrueckl/pisa
def get_event_rates(osc_flux_maps,
                    aeff_service,
                    livetime=None,
                    aeff_scale=None,
                    **kwargs):
    '''
    Main function for this module, which returns the event rate maps
    for each flavor and interaction type, using true energy and zenith
    information. The content of each bin will be the weighted aeff
    multiplied by the oscillated flux, so that the returned dictionary
    will be of the form:
    {'nue': {'cc':map,'nc':map},
     'nue_bar': {'cc':map,'nc':map}, ...
     'nutau_bar': {'cc':map,'nc':map} }
    \params:
      * osc_flux_maps - maps containing oscillated fluxes
      * aeff_service - the effective area service to use
      * livetime - detector livetime for which to calculate event counts
      * aeff_scale - systematic to be a proxy for the realistic effective area
    '''

    #Get parameters used here
    params = get_params()
    report_params(params, units=['', 'yrs', ''])

    #Initialize return dict
    event_rate_maps = {'params': add_params(params, osc_flux_maps['params'])}

    #Get effective area
    aeff_dict = aeff_service.get_aeff()

    ebins, czbins = get_binning(osc_flux_maps)

    # apply the scaling for nu_xsec_scale and nubar_xsec_scale...
    flavours = ['nue', 'numu', 'nutau', 'nue_bar', 'numu_bar', 'nutau_bar']
    for flavour in flavours:
        osc_flux_map = osc_flux_maps[flavour]['map']
        int_type_dict = {}
        for int_type in ['cc', 'nc']:
            event_rate = osc_flux_map * aeff_dict[flavour][
                int_type] * aeff_scale

            event_rate *= (livetime * Julian_year)
            int_type_dict[int_type] = {
                'map': event_rate,
                'ebins': ebins,
                'czbins': czbins
            }
            logging.debug("  Event Rate before reco for %s/%s: %.2f" %
                          (flavour, int_type, np.sum(event_rate)))
        event_rate_maps[flavour] = int_type_dict

    # else: no scaling to be applied
    return event_rate_maps
예제 #31
0
def main():
    """Run `add_fluxes_to_file` function with arguments from command line"""
    args = parse_args()
    set_verbosity(args.v)

    flux_table = load_2d_table(args.flux_file)
    flux_file_bname, ext = splitext(basename(args.flux_file))

    input_paths = []
    for input_path in args.input:
        if isdir(input_path):
            for filename in listdir(input_path):
                filepath = join(input_path, filename)
                input_paths.append(filepath)

        else:
            input_paths += glob.glob(input_path)

    input_paths = nsort(input_paths)

    paths_to_process = []
    basenames = []
    for input_path in input_paths:
        if isdir(input_path):
            logging.debug('Path "%s" is a directory, skipping', input_path)
            continue

        firstpart, ext = splitext(input_path)
        if ext.lstrip('.') not in HDF5_EXTS:
            logging.debug('Path "%s" is not an HDF5 file, skipping',
                          input_path)
            continue

        bname = basename(firstpart)
        if bname in basenames:
            raise ValueError(
                'Found files with duplicate basename "%s" (despite files'
                ' having different paths); resolve the ambiguous names and'
                ' re-run. Offending files are:\n  "%s"\n  "%s"' %
                (bname, paths_to_process[basenames.index(bname)], input_path))

        basenames.append(bname)
        paths_to_process.append(input_path)

    logging.info('Will process %d input file(s)...', len(paths_to_process))

    for filepath in paths_to_process:
        logging.info('Working on input file "%s"', filepath)
        add_fluxes_to_file(data_file_path=filepath,
                           flux_table=flux_table,
                           flux_name='nominal',
                           outdir=args.outdir,
                           label=flux_file_bname)
예제 #32
0
파일: container.py 프로젝트: thehrh/pisa-1
 def binned_to_array(self, key):
     """Augmented binned data to array data"""
     try:
         binning, hist = self.binned_data[key]
     except KeyError:
         if key in self.array_data:
             logging.debug('No transformation for `%s` array data in container `%s`'%(key, self.name))
             return
         else:
             raise ValueError('Key `%s` does not exist in container `%s`'%(key, self.name))
     logging.debug('Transforming %s binned to array data'%(key))
     sample = [self.array_data[n] for n in binning.names]
     self.add_array_data(key, lookup(sample, hist, binning))
예제 #33
0
파일: proc.py 프로젝트: mamday/pisa
def report_params(params,units):
    '''
    Print the parameter values with units
    '''
    formatter = {int: '%i',
               float: '%.4e',
               str: '%s',
               bool: '%s',
               type(None): '%s'}
    #Print everything - must be sorted
    forms = [formatter[type(v)] for k,v in sorted(params.items())] 
    for key, form, unit in zip(sorted(params), forms, units):
        logging.debug(('%20s: '+form+' %s')%(key,params[key],unit))
예제 #34
0
 def __init__(self, ebins, czbins):
     """
     Parameters needed to instantiate any oscillation service:
     * ebins: Energy bin edges
     * czbins: cos(zenith) bin edges
     If further member variables are needed, extend this method.
     """
     logging.debug('Instantiating %s'%self.__class__.__name__)
     self.ebins = np.array(ebins)
     self.czbins = np.array(czbins)
     for ax in [self.ebins, self.czbins]:
         if (len(np.shape(ax)) != 1):
             raise IndexError('Axes must be 1d! '+str(np.shape(ax)))
예제 #35
0
파일: proc.py 프로젝트: schultel/pisa
def report_params(params,units):
    '''
    Print the parameter values with units
    '''
    formatter = {int: '%i',
               float: '%.4e',
               str: '%s',
               bool: '%s',
               type(None): '%s'}
    #Print everything - must be sorted
    forms = [formatter[type(v)] for k,v in sorted(params.items())] 
    for key, form, unit in zip(sorted(params), forms, units):
        logging.debug(('%20s: '+form+' %s')%(key,params[key],unit))
예제 #36
0
 def setup_function(self):
     if self.divide_n:
         self.data.representation = "events"
         for container in self.data:
             self.total_mc[container.name] = container.size
             logging.debug(
                 f"{container.size} mc events in container {container.name}"
             )
     self.data.representation = self.calc_mode
     for container in self.data:
         container["manual_variance"] = np.empty((container.size),
                                                 dtype=FTYPE)
         if "errors" not in container.keys:
             container["errors"] = np.empty((container.size), dtype=FTYPE)
예제 #37
0
def earth_model(YeI, YeO, YeM, PREM_file='osc/nuSQuIDS_PREM.dat'):  # pylint: disable=invalid-name
    """Return a `nuSQUIDSpy.EarthAtm` object with
    user-defined electron fractions. Note that a
    temporary Earth model file is produced (over-
    written) each time this function is executed.

    Parameters
    ----------
    YeI, YeO, YeM : float
        electron fractions in Earth's inner core,
        outer core, and mantle
        (defined by spherical shells with radii of
         1121.5, 3480.0, and 6371.0 km)
    PREM_file : str
        path to nuSQuIDS PREM Earth Model file whose
        electron fractions will be modified

    Returns
    -------
    earth_atm : nuSQUIDSpy.EarthAtm
        can be passed to `Set_EarthModel` method of
        a nuSQuIDs propagator object
    """
    logging.debug("Regenerating nuSQuIDS Earth Model with electron"
                  " fractions: YeI=%s, YeO=%s, YeM=%s" % (YeI, YeO, YeM))
    earth_radius = 6371.0 # km
    # radii at which main transitions occur according to PREM
    transition_radii = np.array([1121.5, 3480.0, earth_radius]) # km

    fname_tmp = os.path.join(CACHE_DIR, "nuSQuIDS_PREM_TMP.dat")
    PREM_file = from_file(fname=PREM_file, as_array=True)
    for i, (r, _, _) in enumerate(PREM_file):
        # r is fraction of total radius
        current_radius = r*earth_radius
        if current_radius <= transition_radii[0]:
            # inner core region
            Ye_new = YeI
        elif current_radius <= transition_radii[1]:
            # outer core region
            Ye_new = YeO
        elif current_radius <= transition_radii[2]:
            # mantle region
            Ye_new = YeM
        # update electron fraction
        PREM_file[i][2] = Ye_new
    # make temporary file
    np.savetxt(fname=fname_tmp, X=PREM_file)
    # create and return the Earth model from file
    earth_atm = nsq.EarthAtm(fname_tmp)
    return earth_atm
예제 #38
0
    def __init__(self,template_settings,ebins=None,czbins=None,oversample_e=None,
                 oversample_cz=None,**kwargs):
        '''
        TemplateMaker class handles all of the setup and calculation of the
        templates for a given binning.

        Parameters:
        * template_settings - dictionary of all template-making settings
        * ebins - energy bin edges
        * czbins - coszen bin edges
        '''

        self.ebins = ebins
        self.czbins = czbins
        self.oversample_e = oversample_e
        self.oversample_cz = oversample_cz
        logging.debug("Using %u bins in energy from %.2f to %.2f GeV"%
                      (len(self.ebins)-1,self.ebins[0],self.ebins[-1]))
        logging.debug("Using %u bins in cos(zenith) from %.2f to %.2f"%
                      (len(self.czbins)-1,self.czbins[0],self.czbins[-1]))

        #Instantiate a flux model service
        self.flux_service = HondaFluxService(**template_settings)

        # Oscillated Flux:
        if template_settings['osc_code']=='prob3':
            self.osc_service = Prob3OscillationService(self.ebins,self.czbins,
                                                       **template_settings)
        else:
            raise NotImplementedError('OscillationService is only implemented for prob3! osc_code = %s'%osc_code)

        # Aeff/True Event Rate:
        if template_settings['parametric']:
            logging.info(" Using effective area from PARAMETRIZATION...")
            self.aeff_service = AeffServicePar(self.ebins,self.czbins,
                                               **template_settings)
        else:
            logging.info(" Using effective area from MC EVENT DATA...")
            self.aeff_service = AeffServiceMC(self.ebins,self.czbins,
                                              **template_settings)

        # Reco Event Rate:
        self.reco_service = RecoServiceMC(self.ebins,self.czbins,
                                          **template_settings)

        # PID Service:
        self.pid_service = PIDServicePar(self.ebins,self.czbins,
                                         **template_settings)

        return
예제 #39
0
    def __init__(self, ebins, czbins, **kwargs):
        """
        Parameters needed to instantiate any reconstruction service:
        * ebins: Energy bin edges
        * czbins: cos(zenith) bin edges
        If further member variables are needed, override this method.
        """
        logging.debug('Instantiating %s' % self.__class__.__name__)
        self.ebins = ebins
        self.czbins = czbins
        for ax in [self.ebins, self.czbins]:
            if len(np.shape(ax)) != 1:
                raise IndexError('Axes must be 1d! ' + str(np.shape(ax)))

        # Get kernels already now. Can be recalculated later, if needed.
        self.kernels = self.get_reco_kernels(**kwargs)
예제 #40
0
    def __init__(self, ebins, czbins, **kwargs):
        """
        Parameters needed to instantiate any reconstruction service:
        * ebins: Energy bin edges
        * czbins: cos(zenith) bin edges
        If further member variables are needed, override this method.
        """
        logging.debug('Instantiating %s' % self.__class__.__name__)
        self.ebins = ebins
        self.czbins = czbins
        for ax in [self.ebins, self.czbins]:
            if len(np.shape(ax)) != 1:
                raise IndexError('Axes must be 1d! '+str(np.shape(ax)))

        # Get kernels already now. Can be recalculated later, if needed.
        self.kernels = self.get_reco_kernels(**kwargs)
예제 #41
0
    def get_osc_prob_maps(self, **kwargs):
        """
        Returns an oscillation probability map dictionary calculated
        at the values of the input parameters:
          deltam21,deltam31,theta12,theta13,theta23,deltacp
        for flavor_from to flavor_to, with the binning of ebins,czbins.
        The dictionary is formatted as:
          'nue_maps': {'nue':map,'numu':map,'nutau':map},
          'numu_maps': {...}
          'nue_bar_maps': {...}
          'numu_bar_maps': {...}
        NOTES:
          * expects all angles in [rad]
          * this method doesn't calculate the oscillation probabilities
            itself, but calls get_osc_probLT_dict internally, to get a
            high resolution map of the oscillation probs,
        """

        #Get the finely binned maps as implemented in the derived class
        logging.info('Retrieving finely binned maps')
        with Timer(verbose=False) as t:
            fine_maps = self.get_osc_probLT_dict(**kwargs)
        print "       ==> elapsed time to get all fine maps: %s sec" % t.secs

        logging.info("Smoothing fine maps...")
        smoothed_maps = {}
        smoothed_maps['ebins'] = self.ebins
        smoothed_maps['czbins'] = self.czbins

        with Timer(verbose=False) as t:
            for from_nu, tomap_dict in fine_maps.items():
                if 'vals' in from_nu: continue
                new_tomaps = {}
                for to_nu, pvals in tomap_dict.items():
                    logging.debug("Getting smoothed map %s/%s" %
                                  (from_nu, to_nu))

                    new_tomaps[to_nu] = get_smoothed_map(
                        pvals, fine_maps['evals'], fine_maps['czvals'],
                        self.ebins, self.czbins)

                smoothed_maps[from_nu] = new_tomaps

        tprofile.debug("       ==> elapsed time to smooth maps: %s sec" %
                       t.secs)

        return smoothed_maps
예제 #42
0
    def _compute_outputs(self, inputs=None):
        """Apply basic cuts and compute histograms for output channels."""

        logging.debug('Entering events_to_data._compute_outputs')

        #Hashing
        #TODO What should I hash??
        hash_property = [
            self.events_file, self.params['dataset'].value, self.output_names
        ]
        this_hash = hash_obj(hash_property, full_hash=self.full_hash)
        #if this_hash == self.sample_hash: #TODO Fix this and replace...
        #    return

        #TODO Check there are no inputs

        #Fill an events instance from a file
        events = Events(self.events_file)

        #TODO Handle nominal, etc, etc datasets?

        #Extract the neutrino data from the 'Events' instance
        nu_data = []
        flav_fidg = FlavIntDataGroup(flavint_groups=events.flavints)
        for flavint in events.present_flavints:
            flav_fidg[flavint] = {
                var: events[flavint][var]
                for var in events[flavint].keys()
            }
        nu_data.append(flav_fidg)

        #Create the data instance, including the metadata
        #Note that there is no muon or noise data  in the 'Events'
        data = Data(reduce(add, nu_data), metadata=deepcopy(events.metadata))

        #Make cuts
        if self.params['keep_criteria'].value is not None:
            self._data.applyCut(self.params['keep_criteria'].value
                                )  #TODO Shivesh says this needs testing
            self._data.update_hash()

        #Update hashes
        self.sample_hash = this_hash
        data.metadata['sample_hash'] = this_hash
        data.update_hash()

        return data
예제 #43
0
    def get_osc_prob_maps(self, **kwargs):
        """
        Returns an oscillation probability map dictionary calculated
        at the values of the input parameters:
          deltam21,deltam31,theta12,theta13,theta23,deltacp
        for flavor_from to flavor_to, with the binning of ebins,czbins.
        The dictionary is formatted as:
          'nue_maps': {'nue':map,'numu':map,'nutau':map},
          'numu_maps': {...}
          'nue_bar_maps': {...}
          'numu_bar_maps': {...}
        NOTES:
          * expects all angles in [rad]
          * this method doesn't calculate the oscillation probabilities
            itself, but calls get_osc_probLT_dict internally, to get a
            high resolution map of the oscillation probs,
        """

        # Get the finely binned maps as implemented in the derived class
        logging.info("Retrieving finely binned maps")
        with Timer(verbose=False) as t:
            fine_maps = self.get_osc_probLT_dict(**kwargs)
        print "       ==> elapsed time to get all fine maps: %s sec" % t.secs

        logging.info("Smoothing fine maps...")
        smoothed_maps = {}
        smoothed_maps["ebins"] = self.ebins
        smoothed_maps["czbins"] = self.czbins

        with Timer(verbose=False) as t:
            for from_nu, tomap_dict in fine_maps.items():
                if "vals" in from_nu:
                    continue
                new_tomaps = {}
                for to_nu, pvals in tomap_dict.items():
                    logging.debug("Getting smoothed map %s/%s" % (from_nu, to_nu))

                    new_tomaps[to_nu] = get_smoothed_map(
                        pvals, fine_maps["evals"], fine_maps["czvals"], self.ebins, self.czbins
                    )

                smoothed_maps[from_nu] = new_tomaps

        profile.debug("       ==> elapsed time to smooth maps: %s sec" % t.secs)

        return smoothed_maps
예제 #44
0
    def link_containers(self, key, names):
        '''
        Parameters
        ----------

        key : str
            name of linked object

        names : list
            name of containers to be linked under the given key

        when containers are linked, they are treated as a single (virtual) container for binned data
        '''
        containers = [self.__getitem__(name) for name in names]
        logging.debug('Linking containers %s into %s' % (names, key))
        new_container = VirtualContainer(key, containers)
        self.linked_containers.append(new_container)
def plot_column(dkey, hkey, subplot, column, template_settings, color, plot_param_info=True, pbins=20):
    """Plot column information"""

    col_name = column.name
    if "llh" not in col_name:
        prior, inj_value, prange, scale = get_col_info(col_name, dkey, hkey, template_settings)
        column = scale * column

    if bool(re.match("^theta", col_name)):
        column = np.rad2deg(column)
        prior = np.rad2deg(prior)
        inj_value = np.rad2deg(inj_value)
        prange = np.rad2deg(prange)

    std = column.std()
    mean = column.mean()

    ax = plt.subplot(2, 2, subplot)
    logging.debug("Processing column: %s" % col_name)

    hist, xbins, patches = plt.hist(column, histtype="step", lw=2, color=color, bins=pbins)
    plt.title(col_name)  # ,fontsize='large')
    plt.grid(True)

    # Plot extra info about priors, injected val, mean, range, etc.
    if plot_param_info:
        ylim = ax.get_ylim()
        ymax = ylim[1]

        # First, plot mean and std dev:
        plot_mean_std(mean, std, ymax, ax)

        # Next: plot injected_val, prior, and bound
        if col_name != "llh":
            plot_injected_val(scale * inj_value, ymax)
            plot_prior(scale * prior, scale * inj_value, ymax, ax)

            # Finally, plot bound:
            plot_bound(scale * prange, ymax, ax)

        ax.set_xlim([mean - 5.0 * std, mean + 5.0 * std])
        ax.set_ylim([ylim[0], ymax * 1.2])

        plt.legend(loc="best", framealpha=0.5)  # ,fontsize='large')

    return
예제 #46
0
 def get_earth_model(self, model):
     """
     Check whether the specified Earth density profile has a correct
     NuCraft preface. If not, create a temporary file that does.
     """
     logging.debug('Trying to construct Earth model from "%s"' % model)
     try:
         resource_path = find_resource(model)
         self.earth_model = EarthModel(resource_path)
         logging.info("Loaded Earth model from %s" % model)
     except SyntaxError:
         # Probably the file is lacking the correct preamble
         logging.info(
             "Failed to construct NuCraft Earth model directly from"
             " %s! Adding default preamble..." % resource_path
         )
         # Generate tempfile with preamble
         with open(resource_path, "r") as infile:
             profile_lines = infile.readlines()
         preamble = [
             "# nuCraft Earth model with PREM density "
             "values for use as template; keep structure "
             "of the first six lines unmodified!\n",
             "(0.4656,0.4656,0.4957)   # tuple of (relative) "
             #'(0.5, 0.5, 0.5)   # tuple of (relative) '
             "electron numbers for mantle, outer core, " "and inner core\n",
             "6371.    # radius of the Earth\n",
             "3480.    # radius of the outer core\n",
             "1121.5   # radius of the inner core\n",
             "# two-columned list of radii and corresponding "
             "matter density values in km and kg/dm^3; "
             "add, remove or modify lines as necessary\n",
         ]
         tfile = NamedTemporaryFile()
         tfile.writelines(preamble + profile_lines)
         tfile.flush()
         try:
             self.earth_model = EarthModel(tfile.name)
         except:
             logging.error("Could not construct Earth model from %s: %s" % (model, sys.exc_info()[1]))
             sys.exit(1)
         logging.info("Successfully constructed Earth model")
         tfile.close()
     except IOError:
         logging.info('Using NuCraft built-in Earth model "%s"' % model)
         self.earth_model = EarthModel(model)
예제 #47
0
파일: bootstrap.py 프로젝트: icecube/pisa
    def setup_function(self):
        
        logging.debug(f"Setting up bootstrap with seed: {self.seed}")

        from numpy.random import default_rng

        rng = default_rng(self.seed)

        for container in self.data:
            sample_size = container["weights"].size
            # indices of events are randomly chosen from the entire sample until
            # we have a new sample of the same size
            sample_idx = rng.integers(sample_size, size=sample_size)
            # Instead of manipulating all of the data arrays, we count how often each
            # index was chosen and take that as a weight, i.e. an event that was selected
            # twice will have a weight of 2.
            sample_weights = np.bincount(sample_idx, minlength=sample_size)
            container["bootstrap_weights"] = sample_weights
예제 #48
0
파일: Aeff.py 프로젝트: gkrueckl/pisa
def get_event_rates(osc_flux_maps, aeff_service, livetime=None, aeff_scale=None, **kwargs):
    """
    Main function for this module, which returns the event rate maps
    for each flavor and interaction type, using true energy and zenith
    information. The content of each bin will be the weighted aeff
    multiplied by the oscillated flux, so that the returned dictionary
    will be of the form:
    {'nue': {'cc':map,'nc':map},
     'nue_bar': {'cc':map,'nc':map}, ...
     'nutau_bar': {'cc':map,'nc':map} }
    \params:
      * osc_flux_maps - maps containing oscillated fluxes
      * aeff_service - the effective area service to use
      * livetime - detector livetime for which to calculate event counts
      * aeff_scale - systematic to be a proxy for the realistic effective area
    """

    # Get parameters used here
    params = get_params()
    report_params(params, units=["", "yrs", ""])

    # Initialize return dict
    event_rate_maps = {"params": add_params(params, osc_flux_maps["params"])}

    # Get effective area
    aeff_dict = aeff_service.get_aeff()

    ebins, czbins = get_binning(osc_flux_maps)

    # apply the scaling for nu_xsec_scale and nubar_xsec_scale...
    flavours = ["nue", "numu", "nutau", "nue_bar", "numu_bar", "nutau_bar"]
    for flavour in flavours:
        osc_flux_map = osc_flux_maps[flavour]["map"]
        int_type_dict = {}
        for int_type in ["cc", "nc"]:
            event_rate = osc_flux_map * aeff_dict[flavour][int_type] * aeff_scale

            event_rate *= livetime * Julian_year
            int_type_dict[int_type] = {"map": event_rate, "ebins": ebins, "czbins": czbins}
            logging.debug("  Event Rate before reco for %s/%s: %.2f" % (flavour, int_type, np.sum(event_rate)))
        event_rate_maps[flavour] = int_type_dict

    # else: no scaling to be applied
    return event_rate_maps
예제 #49
0
 def get_earth_model(self, model):
     """
     Check whether the specified Earth density profile has a correct
     NuCraft preface. If not, create a temporary file that does.
     """
     logging.debug('Trying to construct Earth model from "%s"'%model)
     try:
         resource_path = find_resource(model)
         self.earth_model = EarthModel(resource_path)
         logging.info('Loaded Earth model from %s'%model)
     except SyntaxError:
         #Probably the file is lacking the correct preamble
         logging.info('Failed to construct NuCraft Earth model directly from'
                      ' %s! Adding default preamble...'%resource_path)
         #Generate tempfile with preamble
         with open(resource_path, 'r') as infile:
             profile_lines = infile.readlines()
         preamble = ['# nuCraft Earth model with PREM density '
                      'values for use as template; keep structure '
                      'of the first six lines unmodified!\n',
                     '(0.4656,0.4656,0.4957)   # tuple of (relative) '
                     #'(0.5, 0.5, 0.5)   # tuple of (relative) '
                      'electron numbers for mantle, outer core, '
                      'and inner core\n',
                     '6371.    # radius of the Earth\n',
                     '3480.    # radius of the outer core\n',
                     '1121.5   # radius of the inner core\n',
                     '# two-columned list of radii and corresponding '
                      'matter density values in km and kg/dm^3; '
                      'add, remove or modify lines as necessary\n']
         tfile = NamedTemporaryFile()
         tfile.writelines(preamble+profile_lines)
         tfile.flush()
         try:
             self.earth_model = EarthModel(tfile.name)
         except:
             logging.error('Could not construct Earth model from %s: %s'
                           %(model, sys.exc_info()[1]))
             sys.exit(1)
         logging.info('Successfully constructed Earth model')
         tfile.close()
     except IOError:
         logging.info('Using NuCraft built-in Earth model "%s"'%model)
         self.earth_model = EarthModel(model)
예제 #50
0
파일: resources.py 프로젝트: olivas/pisa
def find_resource(filename, fail = True):
    '''
    Try to find the resource given by directory/filename. Will first check if
    filename is an absolute path, then relative to the $PISA
    environment variable if set. Otherwise will look in the resources directory
    of the pisa installation. Will return the file handle or throw an Exception
    if the file is not found.
    '''

    #First check for absolute path
    fpath = os.path.expanduser(os.path.expandvars(filename))
    logging.trace("Checking if %s is a file..."%fpath)
    if os.path.isfile(fpath):
        logging.debug('Found %s'%(fpath))
        return fpath
    
    #Next check if $PISA is set in environment
    logging.trace("Checking environment for $PISA...")
    if 'PISA' in os.environ:
        rpath = os.path.expanduser(os.path.expandvars(os.environ['PISA']))
        logging.debug('Searching resource path PISA=%s'%rpath)

        fpath = os.path.join(rpath,filename)
        if os.path.isfile(fpath):
            logging.debug('Found %s at %s'%(filename,fpath))
            return fpath

    #Not in the resource path, so look inside the package
    logging.trace('Searching package resources...')
    fpath = resource_filename(__name__,filename)
    if os.path.isfile(fpath):
        logging.debug('Found %s at %s'%(filename,fpath))
        return fpath

    #Nowhere to be found
    if fail:
        raise IOError('Could not find resource "%s"'%filename)
    else:
        logging.debug('Could not find resource "%s"'%filename)
        return None
예제 #51
0
    def get_osc_prob_maps(self, **kwargs):
        """
        Returns an oscillation probability map dictionary calculated
        at the values of the input parameters:
          deltam21,deltam31,theta12,theta13,theta23,deltacp
        for flavor_from to flavor_to, with the binning of ebins,czbins.
        The dictionary is formatted as:
          'nue_maps': {'nue':map,'numu':map,'nutau':map},
          'numu_maps': {...}
          'nue_bar_maps': {...}
          'numu_bar_maps': {...}
        NOTES: * expects all angles in [rad]
               * this method doesn't calculate the oscillation probabi-
                 lities itself, but calls get_osc_probLT_dict internally
        """
        #Get the finely binned maps as implemented in the derived class
        logging.info('Retrieving finely binned maps')
        fine_maps = self.get_osc_probLT_dict(**kwargs)

        logging.info("Smoothing fine maps...")
        profile.info("start smoothing maps")
        smoothed_maps = {}
        smoothed_maps['ebins'] = self.ebins
        smoothed_maps['czbins'] = self.czbins

        for from_nu, tomap_dict in fine_maps.items():
            if 'bins' in from_nu: continue
            new_tomaps = {}
            for to_nu, tomap in tomap_dict.items():
                logging.debug("Getting smoothed map %s/%s"%(from_nu,to_nu))
                new_tomaps[to_nu] = get_smoothed_map(tomap,
                                         fine_maps['ebins'],
                                         fine_maps['czbins'],
                                         self.ebins, self.czbins)
            smoothed_maps[from_nu] = new_tomaps

        profile.info("stop smoothing maps")

        return smoothed_maps
예제 #52
0
파일: utils.py 프로젝트: olivas/pisa
def get_smoothed_map(prob_map, ebinsLT, czbinsLT, ebinsSM, czbinsSM):
    """
    Downsamples a map by averaging over the look up table bins whose
    bin center is within the new (coarser) binning. DOES NOT assume
    that the new (SM) binning is divisible by the old (LT)
    binning. The algorithm is that a new histogram is created from the
    entirety of the data in the Lookup Table.

    NOTATION: LT - "lookup table" (finely binned)
              SM - "smoothed" binning
    """

    # check whether downsampling can be achieved by integer rebinning
    rebin_info = subbinning([ebinsSM, czbinsSM], [ebinsLT, czbinsLT])
    if rebin_info:
        # Use fast numpy magic
        logging.debug("Coarse map is true submap of fine map, " "using numpy array magic for smoothing.")
        smoothed_map = integer_rebin_map(prob_map, rebin_info)

    else:
        ecenLT = get_bin_centers(ebinsLT)
        czcenLT = get_bin_centers(czbinsLT)

        elist = []
        czlist = []
        weight_list = []
        for ie, egy in enumerate(ecenLT):
            for icz, cz in enumerate(czcenLT):
                czlist.append(cz)
                elist.append(egy)
                weight_list.append(prob_map[ie][icz])

        map_sum_wts = np.histogram2d(elist, czlist, weights=weight_list, bins=[ebinsSM, czbinsSM])[0]
        map_num = np.histogram2d(elist, czlist, bins=[ebinsSM, czbinsSM])[0]

        smoothed_map = np.divide(map_sum_wts, map_num)

    return smoothed_map
예제 #53
0
def to_hdf5(oscprob_dict, filename, param_dict):
    
    fh = h5py.File(filename,'w')
    
    edata = fh.create_dataset('ebins',data=oscprob_dict['ebins'],dtype=np.float32)
    czdata = fh.create_dataset('czbins',data=oscprob_dict['czbins'],dtype=np.float32)
    
    for key in oscprob_dict.keys():
        if 'maps' in key:
            logging.debug("  key %s",key)
            group_base = fh.create_group(key)
            for subkey in oscprob_dict[key].keys():
                logging.debug("    subkey %s",subkey)
                dset = group_base.create_dataset(subkey,data=oscprob_dict[key][subkey],dtype=np.float32)
                dset.attrs['ebins'] = edata.ref
                dset.attrs['czbins'] = czdata.ref
        
    param_group = fh.create_group("params")
    logging.debug("  saving param dict...")
    for key in param_dict.keys():
        param_group.create_dataset(key,data=param_dict[key])
        
    fh.close()
    return
예제 #54
0
    def get_osc_prob_maps(self, theta12, theta13, theta23, deltam21, deltam31,
                          deltacp, energy_scale, YeI, YeO, YeM, **kwargs):
        """
        Returns an oscillation probability map dictionary calculated
        at the values of the input parameters:
          deltam21,deltam31,theta12,theta13,theta23,deltacp
        for flavor_from to flavor_to, with the binning defined in the constructor.
        The dictionary is formatted as:
          'nue_maps': {'nue':map,'numu':map,'nutau':map},
          'numu_maps': {...}
          'nue_bar_maps': {...}
          'numu_bar_maps': {...}

        \params:
          * theta12,theta13,theta23 - in [rad]
          * deltam21, deltam31 - in [eV^2]
          * energy_scale - factor to scale energy bin centers
        """

        sin2th12Sq = np.sin(theta12)**2
        sin2th13Sq = np.sin(theta13)**2
        sin2th23Sq = np.sin(theta23)**2

        mAtm = deltam31 if deltam31 < 0.0 else (deltam31 - deltam21)

        # Comment BargerPropagator.cc::SetMNS()
        # "For the inverted Hierarchy, adjust the input
        # by the solar mixing (should be positive)
        # to feed the core libraries the correct value of m32."
        #if mAtm < 0.0: mAtm -= deltam21;

        self.grid_prop.SetMNS(deltam21,mAtm,sin2th12Sq,sin2th13Sq,sin2th23Sq,deltacp)
        self.grid_prop.SetEarthDensityParams(self.prop_height,YeI,YeO,YeM)
        self.prepare_device_arrays()

        dm_mat = np.zeros((3,3),dtype=self.FTYPE)
        self.grid_prop.Get_dm_mat(dm_mat)
        mix_mat = np.zeros((3,3,2),dtype=self.FTYPE)
        self.grid_prop.Get_mix_mat(mix_mat)

        logging.debug("dm_mat: \n %s"%str(dm_mat))
        logging.debug("mix[re]: \n %s"%str(mix_mat[:,:,0]))

        d_dm_mat = cuda.mem_alloc(dm_mat.nbytes)
        d_mix_mat = cuda.mem_alloc(mix_mat.nbytes)
        cuda.memcpy_htod(d_dm_mat,dm_mat)
        cuda.memcpy_htod(d_mix_mat,mix_mat)


        # NEXT: set up smooth maps to give to kernel, and then use
        # PyCUDA to launch kernel...
        logging.info("Initialize smooth maps...")
        smoothed_maps = {}
        smoothed_maps['ebins'] = self.ebins
        smoothed_maps['czbins'] = self.czbins

        nebins_fine = np.uint32(len(self.ecen_fine))
        nczbins_fine = np.uint32(len(self.czcen_fine))
        nebins = np.uint32(len(self.ebins)-1)
        nczbins = np.uint32(len(self.czbins)-1)

        # This goes here, so it can use the energy_scale systematic:
        cuda.memcpy_htod(self.d_ecen_fine,self.ecen_fine*energy_scale)

        smooth_maps = np.zeros((nczbins*nebins*12),dtype=self.FTYPE)
        d_smooth_maps = cuda.mem_alloc(smooth_maps.nbytes)
        cuda.memcpy_htod(d_smooth_maps,smooth_maps)

        block_size = (16,16,1)
        grid_size = (nczbins_fine/block_size[0] + 1, nebins_fine/block_size[1] + 1,2)
        self.propGrid(d_smooth_maps,
                      d_dm_mat, d_mix_mat,
                      self.d_ecen_fine, self.d_czcen_fine,
                      nebins_fine, nczbins_fine,
                      nebins, nczbins,
                      np.uint32(self.maxLayers),
                      self.d_numLayers, self.d_densityInLayer,
                      self.d_distanceInLayer,
                      block=block_size, grid=grid_size)
                      #shared=16384)
        cuda.memcpy_dtoh(smooth_maps,d_smooth_maps)

        self.free_device_memory()
        d_smooth_maps.free()
        d_dm_mat.free()
        d_mix_mat.free()

        # Now put these into smoothed_maps in the correct format as
        # the other oscillation services, to interface properly with
        # the rest of the code:
        smooth_maps = np.reshape(smooth_maps,(12,nebins,nczbins))
        flavs = ['nue','numu','nutau']
        iMap = 0
        for from_nu in ['nue','numu','nue_bar','numu_bar']:
            from_nu += '_maps'
            smoothed_maps[from_nu] = {}
            for to_nu in flavs:
                if '_bar' in from_nu: to_nu+='_bar'
                smoothed_maps[from_nu][to_nu] = smooth_maps[iMap]
                iMap+=1

        return smoothed_maps
예제 #55
0
    def __init__(self, template_settings, ebins, czbins,
                 oversample_e=None, oversample_cz=None, **kwargs):
        '''
        TemplateMaker class handles all of the setup and calculation of the
        templates for a given binning.

        Parameters:
        * template_settings - dictionary of all template-making settings
        * ebins - energy bin edges
        * czbins - coszen bin edges
        '''

        
        self.ebins = ebins
        self.czbins = czbins
        self.oversample_e = oversample_e
        self.oversample_cz = oversample_cz
        logging.debug("Using %u bins in energy from %.2f to %.2f GeV"%
                      (len(self.ebins)-1, self.ebins[0], self.ebins[-1]))
        logging.debug("Using %u bins in cos(zenith) from %.2f to %.2f"%
                      (len(self.czbins)-1, self.czbins[0], self.czbins[-1]))

        # Instantiate a flux model service
        self.flux_service = HondaFluxService(**template_settings)

        # Oscillated Flux Service:
        osc_code = template_settings['osc_code']
        if osc_code == 'prob3':
            self.osc_service = Prob3OscillationService(
                self.ebins, self.czbins, **template_settings)
        elif osc_code == 'gpu':
            self.osc_service = Prob3GPUOscillationService(
                self.ebins, self.czbins, oversample_e=self.oversample_e,
                oversample_cz=self.oversample_cz, **template_settings
            )
        elif osc_code == 'nucraft':
            self.osc_service = NucraftOscillationService(
                self.ebins, self.czbins, **template_settings
            )
        else:
            error_msg = 'OscillationService NOT implemented for ' + \
                    'osc_code = %s' % osc_code
            raise NotImplementedError(error_msg)

        # Aeff/True Event Rate Service:
        aeff_mode = template_settings['aeff_mode']
        if aeff_mode == 'param':
            logging.info(" Using effective area from PARAMETRIZATION...")
            self.aeff_service = AeffServicePar(self.ebins, self.czbins,
                                               **template_settings)
        elif aeff_mode == 'MC':
            logging.info(" Using effective area from MC EVENT DATA...")
            self.aeff_service = AeffServiceMC(self.ebins, self.czbins,
                                              **template_settings)
        else:
            error_msg = "aeff_mode: '%s' is not implemented! "%aeff_mode
            error_msg += " Please choose among: ['MC', 'param']"
            raise NotImplementedError(error_msg)

        # Reco Event Rate Service:
        reco_mode = template_settings['reco_mode']
        if reco_mode == 'MC':
            self.reco_service = RecoServiceMC(self.ebins, self.czbins,
                                              **template_settings)
        elif reco_mode == 'param':
            self.reco_service = RecoServiceParam(self.ebins, self.czbins,
                                                 **template_settings)
        elif reco_mode == 'stored':
            self.reco_service = RecoServiceKernelFile(self.ebins, self.czbins,
                                                      **template_settings)
        elif reco_mode == 'vbwkde':
            self.reco_service = RecoServiceVBWKDE(self.ebins, self.czbins,
                                                  **template_settings)
        else:
            error_msg = "reco_mode: %s is not implemented! "%reco_mode
            error_msg+=" Please choose among: ['MC', 'param', 'stored']"
            raise NotImplementedError(error_msg)

        # PID Service:
        pid_mode = template_settings['pid_mode']
        if pid_mode == 'param':
            self.pid_service = PIDServiceParam(self.ebins, self.czbins,
                                               **template_settings)
        elif pid_mode == 'stored':
            self.pid_service = PIDServiceKernelFile(self.ebins, self.czbins,
                                                    **template_settings)
        else:
            error_msg = "pid_mode: %s is not implemented! "%pid_mode
            error_msg+=" Please choose among: ['stored', 'param']"
            raise NotImplementedError(error_msg)

        return
예제 #56
0
파일: plotUtils.py 프로젝트: tarlen5/pisa
def plot_column(tkey,hkey, subplot, column, template_settings, color,
                plot_param_info=True,pbins=20,mctrue=False):
    """Plot column information"""


    #
    # NOTE: Fix prior implementation here. If prior['kind'] == 'gaussian',
    # then get prior['sigma']!
    #
    # I don't think I need to check for theta, etc...
    #

    col_name = column.name
    if 'llh' not in col_name:
        prior, inj_value, prange, scale = get_col_info(
            col_name, tkey, hkey, template_settings,mctrue=mctrue)
        column = scale*column

    if bool(re.match('^theta',col_name)):
        column = np.rad2deg(column)
        if prior is not None: prior = np.rad2deg(prior)
        inj_value = np.rad2deg(inj_value)
        prange = np.rad2deg(prange)

    std = column.std()
    mean = column.mean()

    ax = plt.subplot(2,2,subplot)
    logging.debug("Processing column: %s"%col_name)

    hist,xbins,patches = plt.hist(column,histtype='step',lw=2,color=color,
                                  bins=pbins)
    plt.title(col_name)#,fontsize='large')
    plt.grid(True)

    # Plot extra info about priors, injected val, mean, range, etc.
    if plot_param_info:
        ylim = ax.get_ylim()
        ymax = ylim[1]

        # First, plot mean and std dev:
        plot_mean_std(mean,std,ymax,ax)

        # Next: plot injected_val, prior, and bound
        if col_name != 'llh':
            plot_injected_val(scale*inj_value,ymax)
            if prior is not None:
                plot_prior(scale*prior,scale*inj_value, ymax,ax)

            # Finally, plot bound:
            plot_bound(scale*prange,ymax,ax)

        if bool(re.match('^theta23',col_name)):
            ax.set_xlim([prange[0],prange[1]])
        else:
            ax.set_xlim([mean-5.0*std,mean+5.0*std])
        ax.set_ylim([ylim[0],ymax*1.2])

        plt.legend(loc='best',framealpha=0.5)#,fontsize='large')

    return
예제 #57
0
    def single_kernel_set(self, e_true, cz_true, e_reco, cz_reco,
                          flav, int_type, make_plots=False, out_dir=None):
        """Construct a 4D kernel set from MC events using VBWKDE.

        Given a set of MC events and each of their {energy{true, reco},
        coszen{true, reco}}, generate a 4D NumPy array that maps a 2D true-flux
        histogram onto the corresponding 2D reco-flux histogram.

        The resulting 4D array can be indexed logically using
          kernel4d[e_true_i, cz_true_j][e_reco_k, cz_reco_l]
        where the 4 indices point from a single MC-true histogram bin (i,j) to
        a single reco histogram bin (k,l).

        Binning of both MC-true and reco histograms is the same and is given by
        the values in self.ebins and self.czbins which define the bin *edges*
        (not the bin centers; hence, len(self.ebins) is one greater than the
        number of bins, etc.).

        NOTE: Actual limits in energy used to group events into a single "true"
        bin may be extended beyond the bin edges defined by self.ebins in order
        to gather enough events to successfully apply VBWKDE.

        Parameters
        ----------
        e_true : sequence
            MC-true neutrino energies, one per event
        cz_true : sequence
            MC-true neutrino coszen, one per event
        e_reco : sequence
            Reconstructed neutrino energies, one per event
        cz_reco : sequence
            Reconstructed neutrino coszen, one per event
        flav : str
        int_type : str
        make_plots : bool
        out_dir : str or None
            path to directory into which to save plots. ``None`` (default)
            saves to PWD.

        Returns
        -------
        kernel4d : 4D array of float
            Mapping from the number of events in each bin of the 2D
            MC-true-events histogram to the number of events reconstructed in
            each bin of the 2D reconstructed-events histogram. Dimensions are
              len(self.ebins)-1 x len(self.czbins)-1 x len(self.ebins)-1 x
              len(self.czbins)-1
            since ebins and czbins define the histograms' bin edges.
        """
        OVERFIT_FACTOR = 1.0

        if make_plots:
            import matplotlib as mpl
            import matplotlib.pyplot as plt
            from matplotlib.backends.backend_pdf import PdfPages
            from matplotlib.patches import Rectangle
            plt.close(1)
            plt.close(2)
            plt.close(3)
            def rugplot(a, y0, dy, ax, **kwargs):
                return ax.plot([a,a], [y0, y0+dy], **kwargs)
            plot_fname = '_'.join(['resolutions', 'vbwkde', flav, int_type]) + '.pdf'
            if out_dir is not None:
                plot_fname = os.path.join(out_dir, plot_fname)
            TOP = 0.925
            BOTTOM = 0.05
            RIGHT = 0.97
            LEFT = 0.07
            HSPACE = 0.12
            LABELPAD = 0.058
            AXISBG = (0.5, 0.5, 0.5)
            DARK_RED =  (0.7, 0.0, 0.0)
            HIST_PP = dict(
                facecolor=(1,0.5,0.5), edgecolor=DARK_RED,
                histtype='stepfilled', alpha=0.7, linewidth=2.0,
                label=r'$\mathrm{Histogram}$'
            )
            N_HBINS = 25
            DIFFUS_PP = dict(
                color=(0.0, 0.0, 0.0), linestyle='-', marker=None, alpha=0.6,
                linewidth=2.0, label=r'$\mathrm{VBWKDE}$'
            )
            RUG_PP = dict(color=(1.0, 1.0, 1.0), linewidth=0.4, alpha=0.5)
            RUG_LAB =r'$\mathrm{Rug\,plot}$'
            LEGFNTCOL = (1,1,1)
            LEGFACECOL = (0.2,0.2,0.2)
            GRIDCOL = (0.4, 0.4, 0.4)
            pdfpgs = PdfPages(plot_fname)

        assert np.min(np.diff(self.ebins)) > 0, \
            "Energy bin edges not monotonically increasing."
        assert np.min(np.diff(self.czbins)) > 0, \
            "coszen bin edges not monotonically increasing."

        # NOTE: below defines bin centers on linear scale; other logic
        # in this method assumes this to be the case, so
        # **DO NOT USE** utils.utils.get_bin_centers in this method, which
        # may return logarithmically-defined centers instead.

        ebin_edges = np.array(self.ebins)
        left_ebin_edges = ebin_edges[0:-1]
        right_ebin_edges = ebin_edges[1:]
        ebin_centers = (left_ebin_edges+right_ebin_edges)/2.0
        ebin_range = ebin_edges[-1] - ebin_edges[0]
        n_ebins = len(ebin_centers)

        czbin_edges = np.array(self.czbins)
        left_czbin_edges = czbin_edges[0:-1]
        right_czbin_edges = czbin_edges[1:]
        czbin_centers = (left_czbin_edges+right_czbin_edges)/2.0
        n_czbins = len(czbin_centers)

        n_events = len(e_true)

        if self.MIN_NUM_EVENTS > n_events:
            self.MIN_NUM_EVENTS = n_events
        if self.TGT_NUM_EVENTS > n_events:
            self.TGT_NUM_EVENTS = n_events

        # Object with which to store the 4D kernels: np 4D array
        kernel4d = np.zeros((n_ebins, n_czbins, n_ebins, n_czbins))

        # Object with which to store the 2D "aggregate_map": the total number
        # of events reconstructed into a given (E, CZ) bin, used for sanity
        # checks
        aggregate_map = np.zeros((n_ebins, n_czbins))
        for ebin_n in range(n_ebins):
            ebin_min = left_ebin_edges[ebin_n]
            ebin_max = right_ebin_edges[ebin_n]
            ebin_mid = (ebin_min+ebin_max)/2.0
            ebin_wid = ebin_max-ebin_min

            logging.debug(
                'Processing true-energy bin_n=' + format(ebin_n, 'd') + ' of ' +
                format(n_ebins-1, 'd') + ', E_{nu,true} in ' +
                '[' + format(ebin_min, '0.3f') + ', ' +
                format(ebin_max, '0.3f') + '] ...'
            )

            # Absolute distance from these events' re-centered reco energies to
            # the center of this energy bin; sort in ascending-distance order
            abs_enu_dist = np.abs(e_true - ebin_mid)
            sorted_abs_enu_dist = np.sort(abs_enu_dist)

            # Grab the distance the number-"TGT_NUM_EVENTS" event is from the
            # bin center
            tgt_thresh_enu_dist = sorted_abs_enu_dist[self.TGT_NUM_EVENTS-1]

            # Grab the distance the number-"MIN_NUM_EVENTS" event is from the
            # bin center
            min_thresh_enu_dist = sorted_abs_enu_dist[self.MIN_NUM_EVENTS-1]

            # TODO: revisit the below algorithm with proper testing

            # Make threshold distance (which is half the total width) no more
            # than 4x the true-energy-bin width in order to capture the
            # "target" number of points (TGT_NUM_EVENTS) but no less than half
            # the bin width (i.e., the bin should be at least be as wide as the
            # pre-defined bin width).
            #
            # HOWEVER, allow the threshold distance (bin half-width) to expand
            # to as much as 4x the original bin full-width in order to capture
            # the "minimum" number of points (MIN_NUM_EVENTS).
            thresh_enu_dist = \
                    max(min(max(tgt_thresh_enu_dist, ebin_wid/2),
                            4*ebin_wid),
                        min_thresh_enu_dist)

            # Grab all events within the threshold distance
            in_ebin_ind = np.where(abs_enu_dist <= thresh_enu_dist)[0]
            #print '** IN EBIN FIRST, LAST ENERGY:', e_reco[in_ebin_ind[0]], e_reco[in_ebin_ind[-1]]
            n_in_bin = len(in_ebin_ind)

            # Record lowest/highest energies that are included in the bin
            actual_left_ebin_edge = min(ebin_min, min(e_true[in_ebin_ind])) #max(min(ebins), ebin_mid-thresh_enu_dist)
            actual_right_ebin_edge = max(ebin_max, max(e_true[in_ebin_ind])) #(max(ebins), ebin_mid+thresh_enu_dist)

            # Extract just the neutrino-energy/coszen error columns' values for
            # succinctness
            enu_err = e_reco[in_ebin_ind] - e_true[in_ebin_ind]
            cz_err = cz_reco[in_ebin_ind] - cz_true[in_ebin_ind]

            #==================================================================
            # Neutrino energy resolutions
            #==================================================================
            dmin = min(enu_err)
            dmax = max(enu_err)
            drange = dmax-dmin

            e_lowerlim = min(self.ENERGY_RANGE[0]-ebin_mid*1.5, dmin-drange*0.5)
            e_upperlim = max((np.max(ebin_edges)-ebin_mid)*1.5, dmax+drange*0.5)
            egy_kde_lims = np.array([e_lowerlim, e_upperlim])

            # Use at least min_num_pts points and at most the next-highest
            # integer-power-of-two that allows for at least 10 points in the
            # smallest energy bin
            min_num_pts = 2**12
            min_bin_width = np.min(ebin_edges[1:]-ebin_edges[:-1])
            min_pts_smallest_bin = 5.0
            kde_range = np.diff(egy_kde_lims)
            num_pts0 = kde_range/(min_bin_width/min_pts_smallest_bin)
            kde_num_pts = int(max(min_num_pts, 2**np.ceil(np.log2(num_pts0))))
            logging.debug(
                '  N_evts=' + str(n_in_bin) + ', taken from [' +
                format(actual_left_ebin_edge, '0.3f') + ', ' +
                format(actual_right_ebin_edge, '0.3f') + ']' + ', VBWKDE lims=' +
                str(egy_kde_lims) + ', VBWKDE_N: ' + str(kde_num_pts)
            )

            # Compute variable-bandwidth KDEs
            enu_bw, enu_mesh, enu_pdf = kde.vbw_kde(
                data           = enu_err,
                overfit_factor = OVERFIT_FACTOR,
                MIN            = egy_kde_lims[0],
                MAX            = egy_kde_lims[1],
                N              = kde_num_pts
            )

            if np.min(enu_pdf) < 0:
                # Only issue warning if the most-negative value is negative
                # beyond specified acceptable-numerical-precision threshold
                # (EPSILON)
                if np.min(enu_pdf) <= -self.EPSILON:
                    logging.warn(
                        "np.min(enu_pdf) < 0: Minimum value is " +
                        str(np.min(enu_pdf)) +
                        "; forcing all negative values to 0."
                    )
                # Otherwise, just quietly clip any negative values at 0
                enu_pdf = np.clip(a=enu_pdf, a_min=0, a_max=np.inf)

            assert np.min(enu_pdf) >= 0, str(np.min(enu_pdf))

            # Re-center distribution at the center of the energy bin for which
            # errors were computed
            offset_enu_mesh = enu_mesh+ebin_mid
            offset_enu_pdf = enu_pdf

            # Get reference area under the PDF, for checking after interpolated
            # values are added.
            #
            # NOTE There should be NO normalization because any events lost due
            # to cutting off tails outside the binned region are actually going
            # to be lost, and so should penalize the total area.
            int_val0 = np.trapz(y=offset_enu_pdf,
                                x=offset_enu_mesh)

            # Create linear interpolator for the PDF
            interp = interpolate.interp1d(
                x             = offset_enu_mesh,
                y             = offset_enu_pdf,
                kind          = 'linear',
                copy          = True,
                bounds_error  = True,
                fill_value    = np.nan
            )

            # Insert all bin edges' exact locations into the mesh (For accurate
            # accounting of area in each bin, must include values out to bin
            # edges)
            edge_locs = [be for be in
                         np.concatenate((left_ebin_edges, right_ebin_edges))
                         if not(be in offset_enu_mesh)]
            edge_locs.sort()
            edge_pdfs = interp(edge_locs)
            insert_ind = np.searchsorted(offset_enu_mesh, edge_locs)
            offset_enu_mesh = np.insert(offset_enu_mesh, insert_ind, edge_locs)
            offset_enu_pdf = np.insert(offset_enu_pdf, insert_ind, edge_pdfs)

            int_val = np.trapz(y=offset_enu_pdf, x=offset_enu_mesh)

            assert np.abs(int_val - int_val0) < self.EPSILON

            # Chop off distribution at extrema of energy bins
            valid_ind = np.where(
                (offset_enu_mesh >= np.min(ebin_edges)) &
                (offset_enu_mesh <= np.max(ebin_edges))
            )[0]
            offset_enu_mesh = offset_enu_mesh[valid_ind]
            offset_enu_pdf = offset_enu_pdf[valid_ind]

            # Check that there are no negative density values (after inserts)
            assert np.min(offset_enu_pdf) > 0-self.EPSILON, \
                str(np.min(offset_enu_pdf))

            # Record the integrated area after removing parts outside binned
            # range
            tot_ebin_area0 = np.trapz(y=offset_enu_pdf,
                                      x=offset_enu_mesh)

            # Check that it integrates to <= 1, sanity check
            assert tot_ebin_area0 < 1+self.EPSILON, str(tot_ebin_area0)

            # Identify indices encapsulating the defined energy bins' ranges,
            # and find the area of each bin
            lbinds = np.searchsorted(offset_enu_mesh, left_ebin_edges)
            rbinds = np.searchsorted(offset_enu_mesh, right_ebin_edges)
            bininds = zip(lbinds, rbinds)
            ebin_areas = [np.trapz(y=offset_enu_pdf[l:r+1],
                                   x=offset_enu_mesh[l:r+1])
                          for (l, r) in bininds]

            # Check that no bins have negative areas
            assert np.min(ebin_areas) >= 0

            # Sum the individual bins' areas
            tot_ebin_area = np.sum(ebin_areas)

            # Check that this total of all the bins is equal to the total area
            # under the curve (i.e., make sure there is no overlap or gaps
            # between bins)
            assert np.abs(tot_ebin_area-tot_ebin_area0) < self.EPSILON, \
                    'tot_ebin_area=' + str(tot_ebin_area) + \
                    ' should equal tot_ebin_area0=' + str(tot_ebin_area0)

            if make_plots:
                fig1 = plt.figure(1, figsize=(8,10), dpi=90)
                fig1.clf()
                ax1 = fig1.add_subplot(211, axisbg=AXISBG)

                # Retrieve region where VBWKDE lives
                ml_ci = confInterval.MLConfInterval(x=enu_mesh, y=enu_pdf)
                #for conf in np.logspace(np.log10(0.999), np.log10(0.95), 50):
                #    try:
                #        lb, ub, yopt, r = ml_ci.findCI_lin(conf=conf)
                #    except:
                #        pass
                #    else:
                #        break
                #xlims = (min(-ebin_mid*1.5, lb),
                #         max(min(ub, 6*ebin_mid),2*ebin_mid))
                lb, ub, yopt, r = ml_ci.findCI_lin(conf=0.98)
                xlims = (lb, #min(-ebin_mid*1.5, lb),
                         max(min(ub, 6*ebin_mid),2*ebin_wid))

                #xlims = (
                #    -ebin_wid*1.5,
                #    ebin_wid*1.5
                #)
                #    min(ebin_mid*2, ebin_edges[-1]+(ebin_edges[-1]-ebin_edges[0])*0.1)
                #)

                # Histogram of events' reco error
                hbins = np.linspace(dmin-0.02*drange, dmax+0.02*drange,
                                    N_HBINS*np.round(drange/ebin_centers[ebin_n]))
                hvals, hbins, hpatches = ax1.hist(enu_err,
                                                  bins=hbins,
                                                  normed=True,
                                                  **HIST_PP)

                # Plot the VBWKDE
                ax1.plot(enu_mesh, enu_pdf, **DIFFUS_PP)
                axlims = ax1.axis('tight')
                ax1.set_xlim(xlims)
                ymax = axlims[3]*1.05
                ax1.set_ylim(0, ymax)

                # Grey-out regions outside binned region, so it's clear what
                # part of tail(s) will be thrown away
                width = -ebin_mid+ebin_edges[0]-xlims[0]
                unbinned_region_tex = r'$\mathrm{Unbinned}$'
                if width > 0:
                    ax1.add_patch(Rectangle((xlims[0],0), width, ymax, #zorder=-1,
                                            alpha=0.30, facecolor=(0.0 ,0.0, 0.0), fill=True,
                                            ec='none'))
                    ax1.text(xlims[0]+(xlims[1]-xlims[0])/40., ymax/10.,
                             unbinned_region_tex, fontsize=14, ha='left',
                             va='bottom', rotation=90, color='k')
                
                width = xlims[1] - (ebin_edges[-1]-ebin_mid)
                if width > 0:
                    ax1.add_patch(Rectangle((xlims[1]-width,0), width, ymax,
                                            alpha=0.30, facecolor=(0, 0, 0),
                                            fill=True, ec='none'))
                    ax1.text(xlims[1]-(xlims[1]-xlims[0])/40., ymax/10.,
                             unbinned_region_tex, fontsize=14, ha='right',
                             va='bottom', rotation=90, color='k')

                # Rug plot of events' reco energy errors
                ylim = ax1.get_ylim()
                dy = ylim[1] - ylim[0]
                ruglines = rugplot(enu_err, y0=ylim[1], dy=-dy/40., ax=ax1,
                                   **RUG_PP)
                ruglines[-1].set_label(RUG_LAB)

                # Legend
                leg_title_tex = r'$\mathrm{Normalized}\,E_\nu\mathrm{-err.\,distr.}$'
                x1lab = ax1.set_xlabel(
                    r'$E_{\nu,\mathrm{reco}}-E_{\nu,\mathrm{true}}\;' +
                    r'(\mathrm{GeV})$', labelpad=LABELPAD
                )
                leg = ax1.legend(loc='upper right', title=leg_title_tex,
                                 frameon=True, framealpha=0.8,
                                 fancybox=True, bbox_to_anchor=[1,0.975])

                # Other plot details
                ax1.xaxis.set_label_coords(0.9, -LABELPAD)
                ax1.xaxis.grid(color=GRIDCOL)
                ax1.yaxis.grid(color=GRIDCOL)
                leg.get_title().set_fontsize(16)
                leg.get_title().set_color(LEGFNTCOL)
                [t.set_color(LEGFNTCOL) for t in leg.get_texts()]
                frame = leg.get_frame()
                frame.set_facecolor(LEGFACECOL)
                frame.set_edgecolor(None)

            #==================================================================
            # Neutrino coszen resolution for events in this energy bin
            #==================================================================
            dmin = min(cz_err)
            dmax = max(cz_err)
            drange = dmax-dmin

            # NOTE the limits are 1 less than / 1 greater than the limits that
            # the error will actually take on, so as to allow for any smooth
            # roll-off at edges of data. The calculation of areas below
            # captures all of the area, though, by reflecting bins defined in
            # [-1, 1] about the points -1 and 1, thereby capturing any
            # densities in the range [-3, +3]. This is not necessarily
            # accurate, but it's better than throwing that info out entirely.
            #
            # NOTE also that since reco events as of now are only in range -1
            # to 0, though, that there are "gaps" in the capture range, but
            # this is due to densities being in the upper-hemisphere which we
            # are intentionally ignoring, rather than the code here not taking
            # them into account. Normalization is based upon *all* events,
            # whether or not they fall within a bin specified above.

            # Number of points in the mesh used for VBWKDE; must be large
            # enough to capture fast changes in the data but the larger the
            # number, the longer it takes to compute the densities at all the
            # points. Here, just choosing a fixed number regardless of the data
            # or binning
            N_cz_mesh = 2**10

            # Data range for VBWKDE to consider
            cz_kde_min = -3
            cz_kde_max = +2

            cz_kde_failed = False
            previous_fail = False
            for n in xrange(3):
                # TODO: only catch specific exception
                try:
                    cz_bw, cz_mesh, cz_pdf = kde.vbw_kde(
                        data           = cz_err,
                        overfit_factor = OVERFIT_FACTOR,
                        MIN            = cz_kde_min,
                        MAX            = cz_kde_max,
                        N              = N_cz_mesh
                    )
                except:
                    cz_kde_failed = True
                    if n == 0:
                        logging.trace('(cz vbwkde ')
                    logging.trace('fail, ')
                    # If failure occurred in vbw_kde, expand the data range it
                    # takes into account; this usually helps
                    cz_kde_min -= 1
                    cz_kde_max += 1
                else:
                    if cz_kde_failed:
                        previous_fail = True
                        logging.trace('success!')
                    cz_kde_failed = False
                finally:
                    if previous_fail:
                        logging.trace(')')
                    previous_fail = False
                    if not cz_kde_failed:
                        break

            if cz_kde_failed:
                logging.warn('Failed to fit VBWKDE!')
                continue

            if np.min(cz_pdf) < 0:
                logging.warn("np.min(cz_pdf) < 0: Minimum value is " +
                             str(np.min(cz_pdf)) +
                             "; forcing all negative values to 0.")
                np.clip(a=cz_mesh, a_min=0, a_max=np.inf)

            assert np.min(cz_pdf) >= -self.EPSILON, \
                str(np.min(cz_pdf))

            # TODO: test and/or visualize the shifting & re-binning process
            for czbin_n in range(n_czbins):
                czbin_mid = czbin_centers[czbin_n]

                # Re-center distribution at the center of the current cz bin
                offset_cz_mesh = cz_mesh + czbin_mid

                # Create interpolation object, used to fill in bin edge values
                interp = interpolate.interp1d(
                    x             = offset_cz_mesh,
                    y             = cz_pdf,
                    kind          = 'linear',
                    copy          = True,
                    bounds_error  = False,
                    fill_value    = 0
                )

                # Figure out where all bin edges lie in this re-centered
                # distribution (some bins may be repeated since bins in [-1,0]
                # and err in [-2,1]:
                #
                # 1. Find limits of mesh values..
                mmin = offset_cz_mesh[0]
                mmax = offset_cz_mesh[-1]

                # 2. Map all bin edges into the full mesh-value range,
                # reflecting about -1 and +1. If the reflected edge is outside
                # the mesh range, use the exceeded limit of the mesh range as
                # the bin edge instead.
                #
                # This maps every bin edge {i} to 3 new edges, indexed
                # new_edges[i][{0,1,2}]. Bins are formed by adjacent indices
                # and same-subindices, so what started as, e.g., bin 3 now is
                # described by (left, right) edges at
                #   (new_edges[3][0], new_edges[4][0]),
                #   (new_edges[3][1], new_edges[4][1]), and
                #   (new_edges[3][2], new_edges[4][2]).

                # NOTE / TODO: It's tempting to dynamically set the number of
                # reflections to minimize computation time, but I think it
                # breaks the code. Just set to a reasonably large number for
                # now and accept the performance penalty. ALSO: if you change
                # the parity of the number of reflections, the code below that
                # has either (wrap_n % 2 == 0) or (wrap_n+1 % 2 == 0) must be
                # swapped!!!
                n_left_reflections = 4
                n_right_reflections = 4

                new_czbin_edges = []
                for edge in czbin_edges:
                    edges_refl_left = []
                    for n in xrange(n_left_reflections):
                        edge_refl_left = reflect1d(edge, -1-(2*n))
                        if edge_refl_left < mmin:
                            edge_refl_left = mmin
                        edges_refl_left.append(edge_refl_left)
                    edges_refl_right = []
                    for n in xrange(n_right_reflections):
                        edge_refl_right = reflect1d(edge, +1+(2*n))
                        if edge_refl_right > mmax:
                            edge_refl_right = mmax
                        edges_refl_right.append(edge_refl_right)
                    # Include all left-reflected versions of this bin edge, in
                    # increasing-x order + this bin edge + right-reflected
                    # versions of this bin edge
                    new_czbin_edges.append(edges_refl_left[::-1] + [edge]
                                           + edges_refl_right)

                # Record all unique bin edges
                edge_locs = set()
                [edge_locs.update(edges) for edges in new_czbin_edges]

                # Throw away bin edges that are already in the mesh
                [edge_locs.remove(edge) for edge in list(edge_locs)
                 if edge in offset_cz_mesh]

                # Make into sorted list
                edge_locs = sorted(edge_locs)

                # Record the total area under the curve
                int_val0 = np.trapz(y=cz_pdf, x=offset_cz_mesh)

                # Insert the missing bin edge locations & pdf-values into
                # the mesh & pdf, respectively
                edge_pdfs = interp(edge_locs)
                insert_ind = np.searchsorted(offset_cz_mesh, edge_locs)
                offset_cz_mesh = np.insert(offset_cz_mesh, insert_ind,
                                           edge_locs)
                offset_cz_pdf = np.insert(cz_pdf, insert_ind, edge_pdfs)
                assert np.min(offset_cz_pdf) > -self.EPSILON

                # Check that this total of all the bins is equal to the total
                # area under the curve (i.e., check there is no overlap between
                # or gaps between bins)
                int_val = np.trapz(y=offset_cz_pdf, x=offset_cz_mesh)
                assert np.abs(int_val-1) < self.EPSILON

                # Renormalize if it's not exactly 1
                if int_val != 1.0:
                    offset_cz_pdf = offset_cz_pdf / int_val

                # Add up the area in the bin and areas that are "reflected"
                # into this bin
                new_czbin_edges = np.array(new_czbin_edges)
                czbin_areas = np.zeros(np.shape(new_czbin_edges)[0]-1)
                for wrap_n in range(np.shape(new_czbin_edges)[1]):
                    bin_edge_inds = np.searchsorted(offset_cz_mesh,
                                                    new_czbin_edges[:,wrap_n])
                    lbinds = bin_edge_inds[0:-1]
                    rbinds = bin_edge_inds[1:]
                    # Make sure indices that appear first are less than indices
                    # that appear second in a pair of bin indices
                    if (wrap_n+1) % 2 == 0:
                        bininds = zip(rbinds, lbinds)
                    else:
                        bininds = zip(lbinds, rbinds)
                    tmp_areas = []
                    for (binind_left_edge, binind_right_edge) in bininds:
                        if binind_left_edge == binind_right_edge:
                            tmp_areas.append(0)
                            continue
                        this_bin_area = np.array(np.trapz(
                            y=offset_cz_pdf[binind_left_edge:binind_right_edge+1],
                            x=offset_cz_mesh[binind_left_edge:binind_right_edge+1]
                        ))
                        tmp_areas.append(this_bin_area)
                    czbin_areas += np.array(tmp_areas)

                assert np.min(czbin_areas) > -self.EPSILON

                tot_czbin_area = np.sum(czbin_areas)
                assert tot_czbin_area < int_val + self.EPSILON

                kernel4d[ebin_n, czbin_n] = np.outer(ebin_areas, czbin_areas)
                assert (np.sum(kernel4d[ebin_n, czbin_n]) -
                        tot_ebin_area*tot_czbin_area) < self.EPSILON

            if make_plots:
                ax2 = fig1.add_subplot(212, axisbg=AXISBG)
                hbins = np.linspace(dmin-0.02*drange, dmax+0.02*drange, N_HBINS*3)
                hvals, hbins, hpatches = ax2.hist(cz_err, bins=hbins,
                                                  normed=True, **HIST_PP)
                ax2.plot(cz_mesh, cz_pdf, **DIFFUS_PP)
                fci = confInterval.MLConfInterval(x=cz_mesh,
                                                  y=cz_pdf)
                lb, ub, yopt, r = fci.findCI_lin(conf=0.995)
                axlims = ax2.axis('tight')
                ax2.set_xlim(lb, ub)
                ax2.set_ylim(0, axlims[3]*1.05)

                ylim = ax2.get_ylim()
                dy = ylim[1] - ylim[0]
                ruglines = rugplot(cz_err, y0=ylim[1], dy=-dy/40., ax=ax2, **RUG_PP)
                ruglines[-1].set_label(r'$\mathrm{Rug\,plot}$')

                x2lab = ax2.set_xlabel(
                    r'$\cos\vartheta_{\mathrm{track,reco}}-\cos\vartheta_{\nu,\mathrm{true}}$',
                    labelpad=LABELPAD
                )
                ax2.xaxis.set_label_coords(0.9, -LABELPAD)
                ax2.xaxis.grid(color=GRIDCOL)
                ax2.yaxis.grid(color=GRIDCOL)
                leg_title_tex = r'$\mathrm{Normalized}\,\cos\vartheta\mathrm{-err.\,distr.}$'
                leg = ax2.legend(loc='upper right', title=leg_title_tex,
                                 frameon=True, framealpha=0.8, fancybox=True,
                                 bbox_to_anchor=[1,0.975])
                leg.get_title().set_fontsize(16)
                leg.get_title().set_color(LEGFNTCOL)
                [t.set_color(LEGFNTCOL) for t in leg.get_texts()]
                frame = leg.get_frame()
                frame.set_facecolor(LEGFACECOL)
                frame.set_edgecolor(None)

                actual_bin_tex = ''
                if (actual_left_ebin_edge != ebin_min) or (actual_right_ebin_edge != ebin_max):
                    actual_bin_tex = r'E_{\nu,\mathrm{true}}\in [' + \
                            format(actual_left_ebin_edge, '0.2f') + r',\,' + \
                            format(actual_right_ebin_edge, '0.2f') + r'] \mapsto '
                stt = r'$\mathrm{Resolutions,\,' + flav_tex(flav) + r'\,' + \
                        int_tex(int_type) + r'}$' + '\n' + \
                        r'$' + actual_bin_tex + r'\mathrm{Bin}_{' + format(ebin_n, 'd') + r'}\equiv E_{\nu,\mathrm{true}}\in [' + format(ebin_min, '0.2f') + \
                        r',\,' + format(ebin_max, '0.2f') + r']\,\mathrm{GeV}' + \
                        r',\,N_\mathrm{events}=' + format(n_in_bin, 'd') + r'$'
                
                fig1.subplots_adjust(top=TOP, bottom=BOTTOM, left=LEFT, right=RIGHT, hspace=HSPACE)
                suptitle = fig1.suptitle(stt)
                suptitle.set_fontsize(16)
                suptitle.set_position((0.5,0.98))
                fig1.savefig(pdfpgs, format='pdf')

        check_areas = kernel4d.sum(axis=(2,3))

        assert np.max(check_areas) < 1 + self.EPSILON, str(np.max(check_areas))
        assert np.min(check_areas) > 0 - self.EPSILON, str(np.min(check_areas))

        if make_plots:
            fig2 = plt.figure(2, figsize=(8,10), dpi=90)
            fig2.clf()
            ax = fig2.add_subplot(111)
            X, Y = np.meshgrid(range(n_czbins), range(n_ebins))
            cm = mpl.cm.Paired_r
            cm.set_over((1,1,1), 1)
            cm.set_under((0,0,0), 1)
            plt.pcolor(X, Y, check_areas, vmin=0+self.EPSILON, vmax=1.0,
                       shading='faceted', cmap=cm)
            plt.colorbar(ticks=np.arange(0, 1.05, 0.05))
            ax.grid(0)
            ax.axis('tight')
            ax.set_xlabel(r'$\cos\vartheta_\mathrm{true}\mathrm{\,bin\,num.}$')
            ax.set_ylabel(r'$E_{\nu,\mathrm{true}}\mathrm{\,bin\,num.}$')
            ax.set_title(r'$\mathrm{Fract\,of\,evts\,starting\,in\,each}\,(E_{\nu,\mathrm{true}},\,\cos\vartheta_\mathrm{true})\,\mathrm{bin\,that\,reco\,in\,bounds}$'+
                 '\n'+r'$\mathrm{None\,should\,be\,>1\,(shown\,white);\,no-event\,bins\,are\,black;\,avg.}=' + format(np.mean(check_areas),'0.3f') + r'$')
            fig2.tight_layout()
            fig2.savefig(pdfpgs, format='pdf')

            check_areas2 = kernel4d.sum(axis=(0,1))
            fig3 = plt.figure(2, figsize=(8,10), dpi=90)
            fig3.clf()
            ax = fig3.add_subplot(111)
            X, Y = np.meshgrid(range(n_czbins), range(n_ebins))
            cm = mpl.cm.Paired_r
            cm.set_over((1,1,1), 1)
            cm.set_under((0,0,0), 1)
            plt.pcolor(X, Y, check_areas2, vmin=0+self.EPSILON,# vmax=1.0,
                       shading='faceted', cmap=cm)
            plt.colorbar(ticks=np.arange(0, 0.1+np.ceil(10.*np.max(check_areas2))/10., 0.05))
            ax.grid(0)
            ax.axis('tight')
            ax.set_xlabel(r'$\cos\vartheta_\mathrm{reco}\mathrm{\,bin\,num.}$')
            ax.set_ylabel(r'$E_{\nu,\mathrm{reco}}\mathrm{\,bin\,num.}$')
            ax.set_title(r'$\mathrm{Normed\,num\,events\,reconstructing\,into\,each}\,(E_{\nu,\mathrm{reco}},\,\cos\vartheta_\mathrm{reco})\,\mathrm{bin}$'+
                 '\n'+r'$\mathrm{No-event\,bins\,are\,black;\,avg.}=' + format(np.mean(check_areas2),'0.3f') + r'$')
            fig3.tight_layout()
            fig3.savefig(pdfpgs, format='pdf')

            pdfpgs.close()

        return kernel4d
예제 #58
0
    def fill_osc_prob(self, osc_prob_dict, ecen, czcen,
                      theta12=None, theta13=None, theta23=None,
                      deltam21=None, deltam31=None, deltacp=None,
                      energy_scale=None,**kwargs):
        """
        Loops over ecen,czcen and fills the osc_prob_dict maps, with
        probabilities calculated according to NuCraft
        """

        #Setup NuCraft for the given oscillation parameters
        #TODO: compatible with new NuCraft version?
        mass_splitting = (1., deltam21, deltam31)
        mixing_angles = [(1,2,np.rad2deg(theta12)),
                         (1,3,np.rad2deg(theta13),np.rad2deg(deltacp)),
                         (2,3,np.rad2deg(theta23))]
        engine = NuCraft(mass_splitting, mixing_angles,
                         earthModel = self.earth_model)
        engine.detectorDepth = self.detector_depth

        if self.prop_height is not None:
            # Fix neutrino production height and detector depth for
            # simulating reactor experiments.
            # In this case, there should be only one zenith angle corresponding
            # to the baseline B. It can be calculated according to:
            #   cos(zen) = ( (r_E - detectorDepth)**2 + B**2 (r_E + atmHeight)**2 ) \
            #               / ( 2 * (r_E + detectorDepth) * B)
            # with r_E = 6371. km
            engine.atmHeight = self.prop_height

        #Make input arrays in correct format
        es, zs = np.meshgrid(ecen, czcen)
        shape = es.shape
        es, zs = es.flatten(), zs.flatten()

        # Apply Energy scaling factor:
        es *= energy_scale

        for prim in osc_prob_dict:

            if 'bins' in prim: continue

            #Convert the particle into a list of IceCube particle IDs
            ps = np.ones_like(es)*get_PDG_ID(prim.rsplit('_', 1)[0])

            # run it
            logging.debug("Calculating oscillation probabilites for %s at %u points..."
                            %(prim.rsplit('_', 1)[0], len(ps)))
            probs = engine.CalcWeights((ps, es, np.arccos(zs)),
                                       atmMode=self.height_mode,
                                       numPrec=self.num_prec)
            logging.debug("...done")

            #Bring into correct shape
            probs = np.array([ x.reshape(shape).T for x in np.array(probs).T ])

            #Fill probabilities into dict
            for i, sec in enumerate(['nue', 'numu', 'nutau']):
                sec_key = sec+'_bar' if 'bar' in prim else sec
                osc_prob_dict[prim][sec_key] = probs[i]

        return
예제 #59
0
파일: Flux.py 프로젝트: sonia3994/pisa
                        help='''Factor to scale nu_nubar_flux by''',default=1.0)
    parser.add_argument('--delta_index',metavar='FLOAT',type=float,
                        default=0.0,help='''Shift in spectral index of numu''')
    parser.add_argument('--energy_scale',metavar='FLOAT',type=float,
                        help='''Factor to scale TRUE energy by''',default=1.0)
    parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE',
                        type=str, action='store', default='flux.json',
                        help='file to store the output')
    parser.add_argument('-v', '--verbose', action='count', default=None,
                        help='set verbosity level')
    args = parser.parse_args()

    #Set verbosity level
    set_verbosity(args.verbose)

    logging.debug("Using %u bins in energy from %.2f to %.2f GeV"%
                                (len(args.ebins)-1,args.ebins[0],args.ebins[-1]))
    logging.debug("Using %u bins in cos(zenith) from %.2f to %.2f"%
                                (len(args.czbins)-1,args.czbins[0],args.czbins[-1]))

    #Instantiate a flux model
    flux_model = HondaFluxService(args.flux_file)

    #get the flux
    flux_maps = get_flux_maps(
        flux_model, args.ebins, args.czbins, args.nue_numu_ratio, args.nu_nubar_ratio,
        args.energy_scale, args.delta_index)

    #write out to a file
    logging.info("Saving output to: %s"%args.outfile)
    to_json(flux_maps, args.outfile)