示例#1
0
def get_gradients(data_tag, param, template_maker, fiducial_params,
                  grid_settings, store_dir):
  """
  Use the template maker to create all the templates needed to obtain the gradients.
  """
  logging.info("Working on parameter %s."%param)

  steps = get_steps(param, grid_settings, fiducial_params)

  pmaps = {}

  # Generate one template for each value of the parameter in question and store in pmaps
  for param_value in steps:

      # Make the template corresponding to the current value of the parameter
      with Timer() as t:
          maps = template_maker.get_template(
              get_values(dict(fiducial_params,**{param:dict(fiducial_params[param],
                                                            **{'value': param_value})})))
      tprofile.info("==> elapsed time for template: %s sec"%t.secs)

      pmaps[param_value] = maps

  # Store the maps used to calculate partial derivatives
  if store_dir != tempfile.gettempdir():
  	logging.info("Writing maps for parameter %s to %s"%(param,store_dir))

  to_json(pmaps, os.path.join(store_dir,param+"_"+data_tag+".json"))

  gradient_map = get_derivative_map(pmaps,fiducial_params[param],degree=2)

  return gradient_map
示例#2
0
def get_hierarchy_gradients(data_tag, fiducial_maps, fiducial_params,
                            grid_settings, store_dir):
  """
  Use the hierarchy interpolation between the two fiducial maps to obtain the
  gradients.
  """
  logging.info("Working on parameter hierarchy.")

  steps = get_steps('hierarchy', grid_settings, fiducial_params)

  hmap = {step:{'trck':{},'cscd':{}} for step in steps}

  for h in steps:
    for channel in ['trck','cscd']:
   	# Superpose bin counts
    	hmap[h][channel]['map'] = fiducial_maps['NMH'][channel]['map']*h + fiducial_maps['IMH'][channel]['map']*(1.-h)
	# Obtain binning from one of the maps, since identical by construction (cf. FisherAnalysis)
	hmap[h][channel]['ebins'] = fiducial_maps['NMH'][channel]['ebins']
	hmap[h][channel]['czbins'] = fiducial_maps['NMH'][channel]['czbins']

  # TODO: give hmap the same structure as pmaps?
  # Get_derivative_map works even if 'params' and 'ebins','czbins' not in 'data'

  # Store the maps used to calculate partial derivatives
  if store_dir != tempfile.gettempdir():
  	logging.info("Writing maps for parameter 'hierarchy' to %s"%store_dir)
  to_json(hmap,os.path.join(store_dir,"hierarchy_"+data_tag+".json"))

  gradient_map = get_derivative_map(hmap, fiducial_params['hierarchy'],degree=2)

  return gradient_map
示例#3
0
def assemble_interpolated_fits(fit_directory, output_file):
    """After all of the fits on the cluster are done, assemble the results to one JSON.
    
    The JSON produced by this function is what `load_interpolated_hypersurfaces`
    expects.
    """
    assert os.path.isdir(fit_directory), "fit directory does not exist"
    metadata = from_json(os.path.join(fit_directory, "metadata.json"))
    
    combined_data = collections.OrderedDict()
    combined_data["interpolation_param_spec"] = metadata["interpolation_param_spec"]
    hs_fits = []
    grid_shape = tuple(metadata["grid_shape"])
    for job_idx, grid_idx in enumerate(np.ndindex(grid_shape)):
        gridpoint_json = os.path.join(fit_directory, f"gridpoint_{job_idx:06d}.json.bz2")
        logging.info(f"Reading {gridpoint_json}")
        gridpoint_data = from_json(gridpoint_json)
        assert job_idx == gridpoint_data["job_idx"]
        assert np.all(grid_idx == gridpoint_data["grid_idx"])
        # TODO: Offer to run incomplete fits locally
        assert gridpoint_data["fit_successful"], f"job no. {job_idx} not finished"
        hs_fits.append(collections.OrderedDict(
            param_values=gridpoint_data["param_values"],
            hs_fit=gridpoint_data["hs_fit"]
        ))
    combined_data["hs_fits"] = hs_fits
    to_json(combined_data, output_file)
示例#4
0
 def saveFile(self, filename):
     """
     Write Fisher matrix to json file
     """
     
     dict_to_write = {}
     dict_to_write['matrix'] = self.matrix
     dict_to_write['parameters'] = self.parameters
     dict_to_write['best_fits'] = self.best_fits
     dict_to_write['labels'] = self.labels
     dict_to_write['priors'] = self.priors
     
     to_json(dict_to_write, filename)
示例#5
0
def to_file(obj, fname, fmt=None, overwrite=True, warn=True, **kwargs):
    """Dispatch correct file writer based on fmt (if specified) or guess
    based on file name's extension"""
    if fmt is None:
        rootname, ext = os.path.splitext(fname)
        ext = ext.replace('.', '').lower()
    else:
        rootname = fname
        ext = fmt.lower()

    if ext in ZIP_EXTS or ext in XOR_EXTS:
        rootname, inner_ext = os.path.splitext(rootname)
        inner_ext = inner_ext.replace('.', '').lower()
        ext = inner_ext

    if ext in jsons.JSON_EXTS:
        return jsons.to_json(obj,
                             fname,
                             overwrite=overwrite,
                             warn=warn,
                             **kwargs)
    elif ext in hdf.HDF5_EXTS:
        return hdf.to_hdf(obj, fname, overwrite=overwrite, warn=warn, **kwargs)
    elif ext in PKL_EXTS:
        return to_pickle(obj, fname, overwrite=overwrite, warn=warn, **kwargs)
    elif ext in TXT_EXTS:
        if kwargs:
            raise ValueError('Following additional keyword arguments not'
                             ' accepted when writing to text file: %s' %
                             kwargs.keys())
        return to_txt(obj, fname)
    else:
        errmsg = 'Unrecognized file type/extension: ' + ext
        log.logging.error(errmsg)
        raise TypeError(errmsg)
示例#6
0
文件: transform.py 项目: terliuk/pisa
    def to_json(self, filename, **kwargs):
        """Serialize the state to a JSON file that can be instantiated as a new
        object later.

        Parameters
        ----------
        filename : str
            Filename; must be either a relative or absolute path (*not
            interpreted as a PISA resource specification*)
        **kwargs
            Further keyword args are sent to `pisa.utils.jsons.to_json()`

        See Also
        --------
        from_json : Intantiate new object from the file written by this method
        pisa.utils.jsons.to_json

        """
        jsons.to_json(self.serializable_state, filename=filename, **kwargs)
示例#7
0
def get_gradients(data_tag, param, template_maker, fiducial_params,
                  grid_settings, store_dir):
    """
  Use the template maker to create all the templates needed to obtain the gradients.
  """
    logging.info("Working on parameter %s." % param)

    steps = get_steps(param, grid_settings, fiducial_params)

    pmaps = {}

    # Generate one template for each value of the parameter in question and store in pmaps
    for param_value in steps:

        # Make the template corresponding to the current value of the parameter
        with Timer() as t:
            maps = template_maker.get_template(
                get_values(
                    dict(
                        fiducial_params, **{
                            param:
                            dict(fiducial_params[param],
                                 **{'value': param_value})
                        })))
        tprofile.info("==> elapsed time for template: %s sec" % t.secs)

        pmaps[param_value] = maps

    # Store the maps used to calculate partial derivatives
    if store_dir != tempfile.gettempdir():
        logging.info("Writing maps for parameter %s to %s" %
                     (param, store_dir))

    to_json(pmaps, os.path.join(store_dir, param + "_" + data_tag + ".json"))

    gradient_map = get_derivative_map(pmaps, fiducial_params[param], degree=2)

    return gradient_map
示例#8
0
def get_hierarchy_gradients(data_tag, fiducial_maps, fiducial_params,
                            grid_settings, store_dir):
    """
  Use the hierarchy interpolation between the two fiducial maps to obtain the
  gradients.
  """
    logging.info("Working on parameter hierarchy.")

    steps = get_steps('hierarchy', grid_settings, fiducial_params)

    hmap = {step: {'trck': {}, 'cscd': {}} for step in steps}

    for h in steps:
        for channel in ['trck', 'cscd']:
            # Superpose bin counts
            hmap[h][channel]['map'] = fiducial_maps['NMH'][channel][
                'map'] * h + fiducial_maps['IMH'][channel]['map'] * (1. - h)
            # Obtain binning from one of the maps, since identical by construction (cf. FisherAnalysis)
            hmap[h][channel]['ebins'] = fiducial_maps['NMH'][channel]['ebins']
            hmap[h][channel]['czbins'] = fiducial_maps['NMH'][channel][
                'czbins']

    # TODO: give hmap the same structure as pmaps?
    # Get_derivative_map works even if 'params' and 'ebins','czbins' not in 'data'

    # Store the maps used to calculate partial derivatives
    if store_dir != tempfile.gettempdir():
        logging.info("Writing maps for parameter 'hierarchy' to %s" %
                     store_dir)
    to_json(hmap, os.path.join(store_dir, "hierarchy_" + data_tag + ".json"))

    gradient_map = get_derivative_map(hmap,
                                      fiducial_params['hierarchy'],
                                      degree=2)

    return gradient_map
示例#9
0
def to_file(obj, fname, fmt=None):
    """Dispatch correct file writer based on fmt (if specified) or guess
    based on file name's extension"""
    if fmt is None:
        base, ext = os.path.splitext(fname)
        ext = ext.replace('.', '').lower()
    else:
        ext = fmt.lower()
    if ext in JSON_EXTS:
        return jsons.to_json(obj, fname)
    elif ext in HDF5_EXTS:
        return hdf.to_hdf(obj, fname)
    elif ext in PKL_EXTS:
        return cPickle.dump(obj, file(fname, 'wb'))
    else:
        errmsg = 'Unrecognized file type/extension: ' + ext
        logging.error(errmsg)
        raise TypeError(errmsg)
示例#10
0
文件: utils.py 项目: lkijmj/pisa
def to_file(obj, fname, fmt=None):
    """Dispatch correct file writer based on fmt (if specified) or guess
    based on file name's extension"""
    if fmt is None:
        base, ext = os.path.splitext(fname)
        ext = ext.replace('.', '').lower()
    else:
        ext = fmt.lower()
    if ext in JSON_EXTS:
        return jsons.to_json(obj, fname)
    elif ext in HDF5_EXTS:
        return hdf.to_hdf(obj, fname)
    elif ext in PKL_EXTS:
        return cPickle.dump(obj, file(fname, 'wb'))
    else:
        errmsg = 'Unrecognized file type/extension: ' + ext
        logging.error(errmsg)
        raise TypeError(errmsg)
示例#11
0
 def store_pid_kernels(self, filename):
     """
     Store PID maps in JSON format
     """
     to_json(self.pid_kernels, filename)
示例#12
0
        if iniargs['prop_height'] is None: iniargs['prop_height'] = 'sample'
        osc_service = NucraftOscillationService(args.ebins, args.czbins,
                                                **iniargs)
    elif args.code == 'table':
        osc_service = TableOscillationService(args.ebins, args.czbins,
                                              **iniargs)
    else:
        raise NotImplementedError('Unknown oscillation service: %s' %
                                  args.code)

    #Do calculation
    logging.info("Calculating oscillation probabilities")
    settings.pop('ebins')
    settings.pop('czbins')
    osc_prob_maps = osc_service.get_osc_prob_maps(**settings)

    #Remove irrelevant parameters from settings
    for par in ['verbose', 'outfile']:  #, 'ebins', 'czbins']:
        settings.pop(par)

    #Write out
    logging.info("Saving output to: %s", args.outfile)
    if fmt == 'JSON':
        osc_prob_maps['params'] = settings
        to_json(osc_prob_maps, args.outfile)
    elif fmt == 'HDF5':
        to_hdf5(osc_prob_maps, args.outfile, settings)
        logging.debug('Wrote %.2f MBytes to %s' %
                      (os.path.getsize(args.outfile) /
                       (1024.**2), os.path.basename(args.outfile)))
示例#13
0
文件: PID.py 项目: mdunkman/pisa
      {"nue_cc": {'czbins':[...], 'ebins':[...], 'map':[...]}, 
       "numu_cc": {...},
       "nutau_cc": {...},
       "nuall_nc": {...} }''')

    parser.add_argument('--settings',metavar='SETTINGS',type=from_json,
                        default=from_json(find_resource('pid/V15_pid.json')),
                        help='''json file containing parameterizations of the particle ID for each event type.''')

    parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE', type=str,
                        action='store',default="pid.json",
                        help='''file to store the output''')
    parser.add_argument('-v', '--verbose', action='count', default=0,
                        help='''set verbosity level''')
    args = parser.parse_args()

    #Set verbosity level
    set_verbosity(args.verbose)

    #Check binning
    ebins, czbins = check_binning(args.reco_event_maps)

    #Initialize the PID service
    pid_service = PIDServicePar(args.settings,ebins,czbins)

    #Galculate event rates after PID
    event_rate_pid = get_pid_maps(args.reco_event_maps,pid_service)
    
    logging.info("Saving output to: %s"%args.outfile)
    to_json(event_rate_pid,args.outfile)
示例#14
0
    parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE', type=str,
                        action='store',default="template.json",
                        help='file to store the output')
    args = parser.parse_args()

    set_verbosity(args.verbose)

    profile.info("start initializing")

    #Load all the settings
    model_settings = from_json(args.template_settings)

    #Select a hierarchy
    logging.info('Selected %s hierarchy'%
            ('normal' if args.normal else 'inverted'))
    params =  select_hierarchy(model_settings['params'],normal_hierarchy=args.normal)

    #Intialize template maker
    template_maker = TemplateMaker(get_values(params),**model_settings['binning'])

    profile.info("stop initializing")

    #Now get the actual template
    profile.info("start template calculation")
    template = template_maker.get_template(get_values(params))
    profile.info("stop template calculation")

    #Write out
    logging.info("Saving output to: %s",args.outfile)
    to_json(template, args.outfile)
xNO, yNO = np.loadtxt(args.normal,unpack=True)
xIO, yIO = np.loadtxt(args.inverted,unpack=True)

y = []
for NO,IO in zip(yNO,yIO):
    y.append(min(NO,IO))

y = np.array(y)

f = scipy.interpolate.splrep(xNO,-y,s=0)

wholedict = {}
wholedict["params"] = {}
wholedict["params"]["param"] = {}

prior = {}
prior["coeffs"] = f[1]
prior["deg"] = f[2]
prior["knots"] = f[0]
prior["kind"] = "spline"

wholedict["params"]["param"]["prior"] = prior

to_json(wholedict, args.normal.split("NO.txt")[0]+"MarginalisedSplinePrior.json")






示例#16
0
                        type=str,
                        action='store',
                        default='flux.json',
                        help='file to store the output')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=None,
                        help='set verbosity level')
    args = parser.parse_args()

    #Set verbosity level
    set_verbosity(args.verbose)

    logging.debug("Using %u bins in energy from %.2f to %.2f GeV" %
                  (len(args.ebins) - 1, args.ebins[0], args.ebins[-1]))
    logging.debug("Using %u bins in cos(zenith) from %.2f to %.2f" %
                  (len(args.czbins) - 1, args.czbins[0], args.czbins[-1]))

    #Instantiate a flux model
    flux_model = HondaFluxService(args.flux_file)

    #get the flux
    flux_maps = get_flux_maps(flux_model, args.ebins, args.czbins,
                              args.nue_numu_ratio, args.nu_nubar_ratio,
                              args.energy_scale, args.delta_index)

    #write out to a file
    logging.info("Saving output to: %s" % args.outfile)
    to_json(flux_maps, args.outfile)
示例#17
0
def run_interpolated_fit(fit_directory, job_idx, skip_successful=False):
    """Run the hypersurface fit for a grid point.

    If `skip_successful` is true, do not run if the `fit_successful` flag is already
    True.
    """

    #TODO a lot of this is copied from fit_hypersurfaces in hypersurface.py, would be safer to make more OAOO
    #TODO Copy the param value storage stuff from fit_hypersurfaces across in the meantime

    assert os.path.isdir(fit_directory), "fit directory does not exist"

    gridpoint_json = os.path.join(fit_directory,
                                  f"gridpoint_{job_idx:06d}.json.bz2")
    gridpoint_data = from_json(gridpoint_json)

    if skip_successful and gridpoint_data["fit_successful"]:
        logging.info(
            f"Fit at job index {job_idx} already successful, skipping...")
        return

    metadata = from_json(os.path.join(fit_directory, "metadata.json"))

    interpolation_param_spec = metadata["interpolation_param_spec"]

    # this is a pipeline configuration in the form of an OrderedDict
    nominal_dataset = metadata["nominal_dataset"]
    # Why can we still not load PISA objects from JSON that are inside a dict?! Grrr...
    nominal_dataset["pipeline_cfg"] = pipeline_cfg_from_states(
        nominal_dataset["pipeline_cfg"])
    # this is a list of pipeline configurations
    sys_datasets = metadata["sys_datasets"]
    for sys_dataset in sys_datasets:
        sys_dataset["pipeline_cfg"] = pipeline_cfg_from_states(
            sys_dataset["pipeline_cfg"])
    # this is a dict of param_name : value pairs
    param_values = gridpoint_data["param_values"]
    # we do a redundant check to make sure the parameter values at this grid point are
    # correct
    interpolation_param_names = metadata["interpolation_param_names"]
    grid_shape = tuple(metadata["grid_shape"])
    # the grid point index of this job
    grid_idx = list(np.ndindex(grid_shape))[job_idx]
    for i, n in enumerate(interpolation_param_names):
        ms = "Inconsistent parameter values at grid point!"
        assert interpolation_param_spec[n]["values"][
            grid_idx[i]] == param_values[n], ms

    # now we need to adjust the values of the parameter in all pipelines for this point
    logging.info(f"updating pipelines with parameter values: {param_values}")
    for dataset in [nominal_dataset] + sys_datasets:
        for stage_cfg in dataset["pipeline_cfg"].values():
            if "params" not in stage_cfg.keys(): continue
            for param in interpolation_param_names:
                if param in stage_cfg["params"].names:
                    stage_cfg["params"][param].value = param_values[param]

    # these are the parameters of the hypersurface, NOT the ones we interpolate them
    # over!
    hypersurface_params = []
    for param_state in metadata["hypersurface_params"]:
        hypersurface_params.append(HypersurfaceParam.from_state(param_state))

    def find_hist_stage(pipeline):
        """Locate the index of the hist stage in a pipeline."""
        hist_idx_found = False
        for i, s in enumerate(pipeline.stages):
            if s.__class__.__name__ == "hist":
                hist_idx = i
                hist_idx_found = True
                break
        if not hist_idx_found:
            raise RuntimeError(
                "Could not find histogram stage in pipeline, aborting.")
        return hist_idx

    # We create Pipeline objects, get their outputs and then forget about the Pipeline
    # object on purpose! The memory requirement to hold all systematic sets at the same
    # time is just too large, especially on the cluster. The way we do it below we
    # only need enough memory for one dataset at a time.

    for dataset in [nominal_dataset] + sys_datasets:
        pipeline = Pipeline(dataset["pipeline_cfg"])
        dataset["mapset"] = pipeline.get_outputs()
        # get the un-weighted event counts as well so that we can exclude bins
        # with too little statistics
        # First, find out which stage is the hist stage
        hist_idx = find_hist_stage(pipeline)
        pipeline.stages[hist_idx].unweighted = True
        dataset["mapset_unweighted"] = pipeline.get_outputs()
    del pipeline

    # Merge maps according to the combine regex, if one was provided
    combine_regex = metadata["combine_regex"]
    if combine_regex is not None:
        for dataset in [nominal_dataset] + sys_datasets:
            dataset["mapset"] = dataset["mapset"].combine_re(combine_regex)
            dataset["mapset_unweighted"] = dataset[
                "mapset_unweighted"].combine_re(combine_regex)

    minimum_mc = metadata["minimum_mc"]
    # Remove bins (i.e. set their count to zero) that have too few MC events
    for dataset in sys_datasets + [nominal_dataset]:
        for map_name in dataset["mapset"].names:
            insuff_mc = dataset["mapset_unweighted"][
                map_name].nominal_values < minimum_mc
            # Setting the hist to zero sets both nominal value and std_dev to zero
            dataset["mapset"][map_name].hist[insuff_mc] = 0.

    hypersurface_fit_kw = metadata["hypersurface_fit_kw"]
    hypersurfaces = collections.OrderedDict()
    log = metadata[
        "log"]  # flag determining whether hs fit is run in log-space or not
    for map_name in nominal_dataset["mapset"].names:
        nominal_map = nominal_dataset["mapset"][map_name]
        nominal_param_values = nominal_dataset["sys_params"]

        sys_maps = [
            sys_dataset["mapset"][map_name] for sys_dataset in sys_datasets
        ]
        sys_param_values = [
            sys_dataset["sys_params"] for sys_dataset in sys_datasets
        ]

        hypersurface = Hypersurface(
            # Yes, this MUST be a deepcopy! Otherwise weird memory overwrites happen
            # and all the numbers get jumbled across the hypersurfaces of different maps
            params=copy.deepcopy(hypersurface_params),
            initial_intercept=0. if log else 1.,  # Initial value for intercept
            log=log)

        hypersurface.fit(
            nominal_map=nominal_map,
            nominal_param_values=nominal_param_values,
            sys_maps=sys_maps,
            sys_param_values=sys_param_values,
            norm=True,
            # Is the space or loading time really a problem?
            # keep_maps=False,  # it would take a lot more space otherwise
            **hypersurface_fit_kw)

        logging.debug("\nFitted hypersurface report:\n%s" % hypersurface)
        hypersurfaces[map_name] = hypersurface

    gridpoint_data["hs_fit"] = hypersurfaces
    gridpoint_data["fit_successful"] = True

    to_json(gridpoint_data, gridpoint_json)
                    templates[str(run_num)]['trck'] = template['trck']['map']
                    templates[str(run_num)]['cscd'] = template['cscd']['map']
                    templates[str(run_num)]['dom_eff'] = run_dict[str(run_num)]['dom_eff']
                    templates[str(run_num)]['hole_ice'] = run_dict[str(run_num)]['hole_ice']

                    # Get MC events map 
                    MCMap = GetMCError(get_values(DH_template_settings['params']), DH_template_settings['binning']['ebins'], DH_template_settings['binning']['czbins'], reco_mc_file)
                    tmap_MC = MCMap.get_mc_events_map(True, get_values(DH_template_settings['params']), reco_mc_file)
                    MCmaps[str(run_num)]['trck'] = tmap_MC['trck']['map']
                    MCmaps[str(run_num)]['cscd'] = tmap_MC['cscd']['map']
    
                #Assemble output dict
                output_template = {'templates' : templates,
                                   'MCmaps': MCmaps,
                                   'template_settings' : template_settings}
                to_json(output_template, outdir+'%s_%s_DomEff_HoleIce_templates_%i_by_%i.json'%(OutputFilePrefix, MC_name,len(ebin_edges)-1,len(czbin_edges)-1))

            # if templates already saved
            else:
                output_template = from_json(outdir+ '%s_DomEff_HoleIce_templates_10_by_16.json'%args.sim) 
                templates = output_template['templates']
                MCmaps = output_template['MCmaps']

            # Do fits (linear and quadratic) for each bin
            for flav in ['trck','cscd']:
                templ_list = []
                templ_err_list = []
                dom_eff_list = []
                hole_ice_list = []
                k_DE_linear = np.empty(np.shape(templates[run_nominal][flav])) 
                k_DE_quad = np.empty(np.shape(templates[run_nominal][flav])) 
示例#19
0
def run_interpolated_fit(fit_directory, job_idx, skip_successful=False):
    """Run the hypersurface fit for a grid point.
    
    If `skip_successful` is true, do not run if the `fit_successful` flag is already
    True.
    """
    
    assert os.path.isdir(fit_directory), "fit directory does not exist"
    
    gridpoint_json = os.path.join(fit_directory, f"gridpoint_{job_idx:06d}.json.bz2")
    gridpoint_data = from_json(gridpoint_json)

    if skip_successful and gridpoint_data["fit_successful"]:
        logging.info(f"Fit at job index {job_idx} already successful, skipping...")
        return

    metadata = from_json(os.path.join(fit_directory, "metadata.json"))
    
    interpolation_param_spec = metadata["interpolation_param_spec"]
    
    # this is a pipeline configuration in the form of an OrderedDict
    nominal_dataset = metadata["nominal_dataset"]
    # Why can we still not load PISA objects from JSON that are inside a dict?! Grrr...
    nominal_dataset["pipeline_cfg"] = pipeline_cfg_from_states(
        nominal_dataset["pipeline_cfg"]
    )
    # this is a list of pipeline configurations
    sys_datasets = metadata["sys_datasets"]
    for sys_dataset in sys_datasets:
        sys_dataset["pipeline_cfg"] = pipeline_cfg_from_states(
            sys_dataset["pipeline_cfg"]
        )
    # this is a dict of param_name : value pairs
    param_values = gridpoint_data["param_values"]
    # we do a redundant check to make sure the parameter values at this grid point are
    # correct
    interpolation_param_names = metadata["interpolation_param_names"]
    grid_shape = tuple(metadata["grid_shape"])
    # the grid point index of this job
    grid_idx = list(np.ndindex(grid_shape))[job_idx]
    for i, n in enumerate(interpolation_param_names):
        ms = "Inconsistent parameter values at grid point!"
        assert interpolation_param_spec[n]["values"][grid_idx[i]] == param_values[n], ms
    
    # now we need to adjust the values of the parameter in all pipelines for this point
    logging.info(f"updating pipelines with parameter values: {param_values}")
    for dataset in [nominal_dataset] + sys_datasets:
        for stage_cfg in dataset["pipeline_cfg"].values():
            if "params" not in stage_cfg.keys(): continue
            for param in interpolation_param_names:
                if param in stage_cfg["params"].names:
                    stage_cfg["params"][param].value = param_values[param]
    
    # these are the parameters of the hypersurface, NOT the ones we interpolate them
    # over!
    hypersurface_params = []
    for param_state in metadata["hypersurface_params"]:
        hypersurface_params.append(HypersurfaceParam.from_state(param_state))
    
    # We create Pipeline objects, get their outputs and then forget about the Pipeline
    # object on purpose! The memory requirement to hold all systematic sets at the same
    # time is just too large, especially on the cluster. The way we do it below we
    # only need enough memory for one dataset at a time.
    nominal_dataset["mapset"] = Pipeline(nominal_dataset["pipeline_cfg"]).get_outputs()
    for sys_dataset in sys_datasets:
        sys_dataset["mapset"] = Pipeline(sys_dataset["pipeline_cfg"]).get_outputs()
    
    # Merge maps according to the combine regex, if one was provided
    combine_regex = metadata["combine_regex"]
    if combine_regex is not None:
        nominal_dataset["mapset"] = nominal_dataset["mapset"].combine_re(combine_regex)
        for sys_dataset in sys_datasets:
            sys_dataset["mapset"] = sys_dataset["mapset"].combine_re(combine_regex)

    hypersurface_fit_kw = metadata["hypersurface_fit_kw"]
    hypersurfaces = collections.OrderedDict()
    log = metadata["log"]  # flag determining whether hs fit is run in log-space or not
    for map_name in nominal_dataset["mapset"].names:
        nominal_map = nominal_dataset["mapset"][map_name]
        nominal_param_values = nominal_dataset["sys_params"]

        sys_maps = [sys_dataset["mapset"][map_name] for sys_dataset in sys_datasets]
        sys_param_values = [sys_dataset["sys_params"] for sys_dataset in sys_datasets]

        hypersurface = Hypersurface(
            # Yes, this MUST be a deepcopy! Otherwise weird memory overwrites happen
            # and all the numbers get jumbled across the hypersurfaces of different maps
            params=copy.deepcopy(hypersurface_params),
            initial_intercept=0. if log else 1.,  # Initial value for intercept
            log=log
        )

        hypersurface.fit(
            nominal_map=nominal_map,
            nominal_param_values=nominal_param_values,
            sys_maps=sys_maps,
            sys_param_values=sys_param_values,
            norm=True,
            # Is the space or loading time really a problem?
            # keep_maps=False,  # it would take a lot more space otherwise
            **hypersurface_fit_kw
        )

        logging.debug("\nFitted hypersurface report:\n%s" % hypersurface)
        hypersurfaces[map_name] = hypersurface

    gridpoint_data["hs_fit"] = hypersurfaces
    gridpoint_data["fit_successful"] = True
    
    to_json(gridpoint_data, gridpoint_json)
示例#20
0
 def store_pid_kernels(self, filename):
     """
     Store PID maps in JSON format
     """
     to_json(self.pid_kernels, filename)
示例#21
0
papa_NC = parameterizations['NC']
egy_res = papa_NC['e']
papa_NC.pop('e')
papa_NC['energy'] = egy_res

pisa_reco_settings = {}
for flav in pisa_flavs:
    papa_res = parameterizations[flav]
    egy_res = papa_res['e']
    papa_res.pop('e')
    papa_res['energy'] = egy_res

    for ID in mID:
        pisa_reco_settings[flav + ID] = {'cc': {}, 'nc': {}}
        pisa_reco_settings[flav + ID]['cc'] = copy(papa_res)
        pisa_reco_settings[flav + ID]['nc'] = copy(papa_NC)

to_json(pisa_reco_settings, args.outfile)

# Now, read in the file as if it were a text file and replace n. with np.
fh = open(args.outfile, 'r')
output_lines = []
for line in fh.readlines():
    output_lines.append(line.replace("n.", "np."))
fh.close()

fh = open(args.outfile, 'w')
for line in output_lines:
    fh.write(line)
fh.close()
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter

from pisa.utils.jsons import from_json, to_json
import numpy as np

parser = ArgumentParser(description='''Determines the false_h_best fiducial distribution, under the Gaussian assumption.''',
                        formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-p','--pisa_file', type=str, required=True,
                    help="File containing PISA V2 event by event data")
args = parser.parse_args()

pisa_dict = from_json(args.pisa_file)['pid']['reduced']

to_json(pisa_dict,'better.json')
    
示例#23
0
    if args.code=='prob3':
      if iniargs['prop_height'] is None: iniargs['prop_height'] = 20.0
      osc_service = Prob3OscillationService(ebins, czbins, **iniargs)
    elif args.code=='nucraft':
      if iniargs['prop_height'] is None: iniargs['prop_height'] = 'sample'
      osc_service = NucraftOscillationService(ebins, czbins, **iniargs)
    elif args.code=='gpu':
        settings = vars(args)
        osc_service = Prob3GPUOscillationService(ebins, czbins, **settings)
    else:
      osc_service = TableOscillationService(ebins, czbins, **iniargs)

    logging.info("Getting osc prob maps")
    with Timer(verbose=False) as t:
        osc_flux_maps = get_osc_flux(args.flux_maps, osc_service,
                                     deltam21 = args.deltam21,
                                     deltam31 = args.deltam31,
                                     deltacp = args.deltacp,
                                     theta12 = args.theta12,
                                     theta13 = args.theta13,
                                     theta23 = args.theta23,
                                     oversample_e = args.oversample_e,
                                     oversample_cz = args.oversample_cz,
                                     energy_scale = args.energy_scale,
                                     YeI=args.YeI, YeO=args.YeO, YeM=args.YeM)
    print "       ==> elapsed time to get osc flux maps: %s sec"%t.secs

    #Write out
    logging.info("Saving output to: %s",args.outfile)
    to_json(osc_flux_maps, args.outfile)
示例#24
0
        # Second for anti-neutrinos:
        kNuBar = -1
        barger_prop.SetMNS(sin2th12Sq, sin2th13Sq, sin2th23Sq, args.deltam21,
                           mAtm, args.deltacp, energy, kSquared, kNuBar)
        barger_prop.DefinePath(coszen, args.prod_height)
        barger_prop.propagate(kNuBar)

        for nu in ['nue_bar', 'numu_bar']:
            nu_i = nu_barger[nu]
            for to_nu in anti_neutrinos:
                nu_f = nu_barger[to_nu]
                osc_prob_dict[nu +
                              '_maps'][to_nu][ie][icz] = barger_prop.GetProb(
                                  nu_i, nu_f)

logging.info("Saving to file: %s" % args.outfile)
to_json(osc_prob_dict, args.outfile)

logging.info("Finished in %s seconds!" % (datetime.now() - start_time))

if args.plot:
    from matplotlib import pyplot as plt
    from pisa.utils.plot import show_map
    pmap = {
        'map': osc_prob_dict['numu_maps']['numu'],
        'ebins': osc_prob_dict['ebins'],
        'czbins': osc_prob_dict['czbins']
    }
    show_map(pmap)
    plt.show()
示例#25
0
def get_fisher_matrices(template_settings,
                        grid_settings,
                        IMH=True,
                        NMH=False,
                        dump_all_stages=False,
                        save_templates=False,
                        outdir=None):
    '''
  Main function that runs the Fisher analysis for the chosen hierarchy(ies) (inverted by default).

  Returns a dictionary of Fisher matrices, in the format:
  {'IMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
          },
  'NMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
         }
  }

  If save_templates=True and no hierarchy is given, only fiducial templates will be written out;
  if one is given, then the templates used to obtain the gradients will be written out in
  addition.
  '''
    if outdir is None and (save_templates or dump_all_stages):
        logging.info(
            "No output directory specified. Will save templates to current working directory."
        )
        outdir = os.getcwd()

    tprofile.info("start initializing")

    # Get the parameters
    params = template_settings['params']
    bins = template_settings['binning']

    # Artifically add the hierarchy parameter to the list of parameters
    # The method get_hierarchy_gradients below will know how to deal with it
    params['hierarchy_nh'] = {
        "value": 1.,
        "range": [0., 1.],
        "fixed": False,
        "prior": None
    }
    params['hierarchy_ih'] = {
        "value": 0.,
        "range": [0., 1.],
        "fixed": False,
        "prior": None
    }

    chosen_data = []
    if IMH:
        chosen_data.append(('IMH', False))
        logging.info("Fisher matrix will be built for IMH.")
    if NMH:
        chosen_data.append(('NMH', True))
        logging.info("Fisher matrix will be built for NMH.")
    if chosen_data == []:
        # In this case, only the fiducial maps (for both hierarchies) will be written
        logging.info("No Fisher matrices will be built.")

    # There is no sense in performing any of the following steps if no Fisher matrices are to be built
    # and no templates are to be saved.
    if chosen_data != [] or dump_all_stages or save_templates:

        # Initialise return dict to hold Fisher matrices
        fisher = {
            data_tag: {
                'cscd': [],
                'trck': [],
                'comb': []
            }
            for data_tag, data_normal in chosen_data
        }

        # Get a template maker with the settings used to initialize
        template_maker = TemplateMaker(get_values(params), **bins)

        tprofile.info("stop initializing\n")

        # Generate fiducial templates for both hierarchies (needed for partial derivatives
        # w.r.t. hierarchy parameter)
        fiducial_maps = {}
        for hierarchy in ['NMH', 'IMH']:

            logging.info("Generating fiducial templates for %s." % hierarchy)

            # Get the fiducial parameter values corresponding to this hierarchy
            fiducial_params = select_hierarchy(
                params, normal_hierarchy=(hierarchy == 'NMH'))

            # Generate fiducial maps, either all of them or only the ultimate one
            tprofile.info("start template calculation")
            with Timer() as t:
                fid_maps = template_maker.get_template(
                    get_values(fiducial_params), return_stages=dump_all_stages)
            tprofile.info("==> elapsed time for template: %s sec" % t.secs)

            fiducial_maps[
                hierarchy] = fid_maps[4] if dump_all_stages else fid_maps

            # save fiducial map(s)
            # all stages
            if dump_all_stages:
                stage_names = ("0_unoscillated_flux", "1_oscillated_flux",
                               "2_oscillated_counts", "3_reco", "4_pid")
                stage_maps = {}
                for stage in xrange(0, len(fid_maps)):
                    stage_maps[stage_names[stage]] = fid_maps[stage]
                logging.info(
                    "Writing fiducial maps (all stages) for %s to %s." %
                    (hierarchy, outdir))
                to_json(stage_maps,
                        os.path.join(outdir, "fid_map_" + hierarchy + ".json"))
            # only the final stage
            elif save_templates:
                logging.info(
                    "Writing fiducial map (final stage) for %s to %s." %
                    (hierarchy, outdir))
                to_json(fiducial_maps[hierarchy],
                        os.path.join(outdir, "fid_map_" + hierarchy + ".json"))

        # Get_gradients and get_hierarchy_gradients will both (temporarily)
        # store the templates used to generate the gradient maps
        store_dir = outdir if save_templates else tempfile.gettempdir()

        # Calculate Fisher matrices for the user-defined cases (NHM true and/or IMH true)
        for data_tag, data_normal in chosen_data:

            logging.info("Running Fisher analysis for %s." % (data_tag))

            # The fiducial params are selected from the hierarchy case that does NOT match
            # the data, as we are varying from this model to find the 'best fit'
            fiducial_params = select_hierarchy(params, not data_normal)

            # Get the free parameters (i.e. those for which the gradients should be calculated)
            free_params = select_hierarchy(get_free_params(params),
                                           not data_normal)
            gradient_maps = {}
            for param in free_params.keys():
                # Special treatment for the hierarchy parameter
                if param == 'hierarchy':
                    gradient_maps[param] = get_hierarchy_gradients(
                        data_tag=data_tag,
                        fiducial_maps=fiducial_maps,
                        fiducial_params=fiducial_params,
                        grid_settings=grid_settings,
                        store_dir=store_dir)
                else:
                    gradient_maps[param] = get_gradients(
                        data_tag=data_tag,
                        param=param,
                        template_maker=template_maker,
                        fiducial_params=fiducial_params,
                        grid_settings=grid_settings,
                        store_dir=store_dir)

            logging.info("Building Fisher matrix for %s." % (data_tag))

            # Build Fisher matrices for the given hierarchy
            fisher[data_tag] = build_fisher_matrix(
                gradient_maps=gradient_maps,
                fiducial_map=fiducial_maps['IMH']
                if data_normal else fiducial_maps['NMH'],
                template_settings=fiducial_params)

            # If Fisher matrices exist for both channels, add the matrices to obtain
            # the combined one.
            if len(fisher[data_tag].keys()) > 1:
                fisher[data_tag]['comb'] = FisherMatrix(
                    matrix=np.array([
                        f.matrix for f in fisher[data_tag].itervalues()
                    ]).sum(axis=0),
                    parameters=gradient_maps.keys(),  #order is important here!
                    best_fits=[
                        fiducial_params[par]['value']
                        for par in gradient_maps.keys()
                    ],
                    priors=[
                        Prior.from_param(fiducial_params[par])
                        for par in gradient_maps.keys()
                    ],
                )
        return fisher

    else:
        logging.info("Nothing to be done.")
        return {}
示例#26
0
                
        # Second for anti-neutrinos:
        kNuBar = -1
        barger_prop.SetMNS(sin2th12Sq,sin2th13Sq,sin2th23Sq,args.deltam21,mAtm,
                           args.deltacp,energy,kSquared,kNuBar)
        barger_prop.DefinePath(coszen, args.prod_height)
        barger_prop.propagate(kNuBar)
        
        for nu in ['nue_bar','numu_bar']:
            nu_i = nu_barger[nu]
            for to_nu in anti_neutrinos:
                nu_f = nu_barger[to_nu]
                osc_prob_dict[nu+'_maps'][to_nu][ie][icz]=barger_prop.GetProb(nu_i, nu_f)


logging.info("Saving to file: %s"%args.outfile)
to_json(osc_prob_dict,args.outfile)

logging.info("Finished in %s seconds!"%(datetime.now() - start_time))

if args.plot:
    from matplotlib import pyplot as plt
    from pisa.utils.plot import show_map
    pmap = {'map':osc_prob_dict['numu_maps']['numu'],
            'ebins':osc_prob_dict['ebins'],
            'czbins': osc_prob_dict['czbins']}
    show_map(pmap)
    plt.show()
    
    
示例#27
0
def prepare_interpolated_fit(nominal_dataset,
                             sys_datasets,
                             params,
                             fit_directory,
                             interpolation_param_spec,
                             combine_regex=None,
                             log=False,
                             minimum_mc=0,
                             **hypersurface_fit_kw):
    '''
    Writes steering files for fitting hypersurfaces on a grid of arbitrary parameters.
    The fits can then be run on a cluster with `run_interpolated_fit`.

    Parameters
    ----------
    nominal_dataset : dict
        Definition of the nominal dataset. Specifies the pipleline with which the maps
        can be created, and the values of all systematic parameters used to produced the
        dataset.
        Format must be:
            nominal_dataset = {
                "pipeline_cfg" = <pipeline cfg file (either cfg file path or dict)>),
                "sys_params" = { param_0_name : param_0_value_in_dataset, ..., param_N_name : param_N_value_in_dataset }
            }
        Sys params must correspond to the provided HypersurfaceParam instances provided
        in the `params` arg.

    sys_datasets : list of dicts
        List of dicts, where each dict defines one of the systematics datasets to be
        fitted. The format of each dict is the same as explained for `nominal_dataset`

    params : list of HypersurfaceParams
        List of HypersurfaceParams instances that define the hypersurface. Note that
        this defined ALL hypersurfaces fitted in this function, e.g. only supports a
        single parameterisation for all maps (this is almost always what you want).

    output_directory : str
        Directory in which the fits will be run. Steering files for the fits to be run
        will be stored here.

    combine_regex : list of str, or None
        List of string regex expressions that will be used for merging maps. Used to
        combine similar species. Must be something that can be passed to the
        `MapSet.combine_re` function (see that functions docs for more details). Choose
        `None` is do not want to perform this merging.

    interpolation_param_spec : collections.OrderedDict
        Specification of parameter grid that hypersurfaces should be interpolated over.
        The dict should have the following form::
            interpolation_param_spec = {
                'param1': {"values": [val1_1, val1_2, ...], "scales_log": True/False}
                'param2': {"values": [val2_1, val2_2, ...], "scales_log": True/False}
                ...
                'paramN': {"values": [valN_1, valN_2, ...], "scales_log": True/False}
            }
        The hypersurfaces will be fit on an N-dimensional rectilinear grid over
        parameters 1 to N. The flag `scales_log` indicates that the interpolation over
        that parameter should happen in log-space.

    minimum_mc : int, optional
        Minimum number of un-weighted MC events required in each bin.

    hypersurface_fit_kw : kwargs
        kwargs will be passed on to the calls to `Hypersurface.fit`
    '''

    # Take (deep) copies of lists/dicts to avoid modifying the originals
    # Useful for cases where this function is called in a loop (e.g. leave-one-out tests)
    nominal_dataset = copy.deepcopy(nominal_dataset)
    sys_datasets = copy.deepcopy(sys_datasets)
    params = copy.deepcopy(params)

    # Check types
    assert isinstance(sys_datasets, collections.Sequence)
    assert isinstance(params, collections.Sequence)
    assert isinstance(fit_directory, str)
    # there must not be any ambiguity between fitting the hypersurfaces and
    # interpolating them later
    msg = "interpolation params must be specified as a dict with ordered keys"
    assert isinstance(interpolation_param_spec, collections.OrderedDict), msg
    for k, v in interpolation_param_spec.items():
        assert set(v.keys()) == {"values", "scales_log"}
        assert isinstance(v["values"], collections.Sequence)
        # We need to extract the magnitudes from the Quantities to avoid a
        # UnitStrippedWarning. For some reason, doing `np.min(v["values"])` messes up
        # the data structure inside the values in a way that can cause a crash when we
        # try to serialize the values later. Lesson: Stripping units inadvertently can
        # have strange, unforeseen consequences.
        mags = [x.m for x in v["values"]]
        if v["scales_log"] and np.min(mags) <= 0:
            raise ValueError(
                "A log-scaling parameter cannot be equal to or less "
                "than zero!")

    # Check output format and path
    assert os.path.isdir(fit_directory), "fit directory does not exist"

    # Check formatting of datasets is as expected
    all_datasets = [nominal_dataset] + sys_datasets
    for dataset in all_datasets:
        assert isinstance(dataset, collections.Mapping)
        assert "pipeline_cfg" in dataset
        assert isinstance(dataset["pipeline_cfg"], (str, collections.Mapping))
        assert "sys_params" in dataset
        assert isinstance(dataset["sys_params"], collections.Mapping)

        dataset["pipeline_cfg"] = serialize_pipeline_cfg(
            dataset["pipeline_cfg"])

    # Check params
    assert len(params) >= 1
    for p in params:
        assert isinstance(p, HypersurfaceParam)

    # Report inputs
    msg = "Hypersurface fit details :\n"
    msg += f"  Num params            : {len(params)}\n"
    msg += f"  Num fit coefficients  : {sum([p.num_fit_coeffts for p in params])}\n"
    msg += f"  Num datasets          : 1 nominal + {len(sys_datasets)} systematics\n"
    msg += f"  Nominal values        : {nominal_dataset['sys_params']}\n"
    msg += "Hypersurface fits are prepared on the following grid:\n"
    msg += str(interpolation_param_spec)
    logging.info(msg)

    # because we require this to be an OrderedDict, there is no ambiguity in the
    # construction of the mesh here
    param_names = list(interpolation_param_spec.keys())
    grid_shape = tuple(
        len(v["values"]) for v in interpolation_param_spec.values())

    # We store all information needed to run a fit in metadata
    metadata = collections.OrderedDict(
        interpolation_param_spec=interpolation_param_spec,
        interpolation_param_names=param_names,  # convenience
        grid_shape=grid_shape,  # convenience
        nominal_dataset=nominal_dataset,
        sys_datasets=sys_datasets,
        hypersurface_params=params,
        combine_regex=combine_regex,
        log=log,
        minimum_mc=minimum_mc,
        hypersurface_fit_kw=hypersurface_fit_kw)

    to_json(metadata, os.path.join(fit_directory, "metadata.json"))

    # we write on JSON file for each grid point
    for job_idx, grid_idx in enumerate(np.ndindex(grid_shape)):
        # Although this is technically redundant, we store the parameter values
        # explicitly for each grid point.
        param_values = {}
        for i, n in enumerate(param_names):
            param_values[n] = interpolation_param_spec[n]["values"][
                grid_idx[i]]

        gridpoint_data = {
            "param_values": param_values,
            "hs_fit": None,
            "job_idx": job_idx,
            "grid_idx": grid_idx,
            "fit_successful": False
        }
        to_json(
            gridpoint_data,
            os.path.join(fit_directory, f"gridpoint_{job_idx:06d}.json.bz2"))

    logging.info(
        f"Grid fit preparation complete! Total number of jobs: {job_idx+1}")
    return job_idx + 1  # zero-indexing
示例#28
0
文件: PID.py 项目: mamday/pisa
                        help='JSON file containing parameterizations '
                        'of the particle ID \nfor each event type.')
    parser.add_argument('--kernel_file', metavar='JSON', type=str, default=None,
                        help='JSON file containing pre-calculated PID kernels')
    parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE', type=str,
                        action='store',default="pid.json",
                        help='''file to store the output''')
    parser.add_argument('-v', '--verbose', action='count', default=None,
                        help='''set verbosity level''')
    args = parser.parse_args()

    #Set verbosity level
    set_verbosity(args.verbose)

    #Check binning
    ebins, czbins = check_binning(args.reco_event_maps)

    #Initialize the PID service
    if args.mode=='param':
        pid_service = PIDServiceParam(ebins, czbins, 
                            pid_paramfile=args.param_file, **vars(args))
    elif args.mode=='stored':
        pid_service = PIDServiceKernelFile(ebins, czbins, 
                            pid_kernelfile=args.kernel_file, **vars(args))

    #Calculate event rates after PID
    event_rate_pid = get_pid_maps(args.reco_event_maps, pid_service=pid_service)

    logging.info("Saving output to: %s"%args.outfile)
    to_json(event_rate_pid,args.outfile)
示例#29
0
                    llh_data = find_max_llh_bfgs(fmap,
                                                 template_maker,
                                                 template_settings['params'],
                                                 minimizer_settings,
                                                 args.save_steps,
                                                 normal_hierarchy=hypo_normal)
                profile.info("==> elapsed time for optimizer: %s sec" % t.secs)

                # Store the LLH data
                results[data_tag][hypo_tag] = llh_data

        # Store this trial
        trials += [results]
        profile.info("stop trial %d" % itrial)

except:
    logging.warn("ERROR IN TRIAL %i, so outputting what we have now!!" %
                 itrial)

#Assemble output dict
output = {
    'trials': trials,
    'template_settings': template_settings,
    'minimizer_settings': minimizer_settings
}
if args.pseudo_data_settings is not None:
    output['pseudo_data_settings'] = pseudo_data_settings

    #And write to file
to_json(output, args.outfile)
示例#30
0
                title=flavInt.tex(grp, d=1) + leg_sfx,
            )
            leg.get_title().set_fontsize(16)

    # Derive output filename
    outfname = ('pid_energy_dependence__%s_%s__runs_%s__proc_%s__pid_%s.json' %
                (
                    events.metadata['detector'],
                    events.metadata['geom'],
                    utils.list2hrlist(events.metadata['runs']),
                    events.metadata['proc_ver'],
                    args.pid_ver,
                ))
    outfpath = os.path.join(outdir, outfname)
    logging.info('Saving PID energy dependence info to file "%s"' % outfpath)
    jsons.to_json(esmooth_store, outfpath)

    if make_plots:
        fig.tight_layout(rect=(0, 0, 1, 0.96))
        basefname = ('pid_energy_dependence__%s_%s__runs_%s__proc_%s__pid_%s' %
                     (events.metadata['detector'], events.metadata['geom'],
                      utils.list2hrlist(events.metadata['runs']),
                      events.metadata['proc_ver'], args.pid_ver))
        fig.savefig(os.path.join(outdir, basefname + '.pdf'))
        fig.savefig(os.path.join(outdir, basefname + '.png'))

    #===============================================================================
    # COSZEN
    #===============================================================================
    single_ebin = [1, 80]
    pidmc = PIDServiceMC(ebins=single_ebin,
示例#31
0
    parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE',
                        type=str, action='store',default="template.json",
                        help='file to store the output')
    args = parser.parse_args()

    set_verbosity(args.verbose)

    with Timer() as t:
        #Load all the settings
        model_settings = from_json(args.template_settings)

        #Select a hierarchy
        logging.info('Selected %s hierarchy'%
                     ('normal' if args.normal else 'inverted'))
        params =  select_hierarchy(model_settings['params'],
                                   normal_hierarchy=args.normal)

        #Intialize template maker
        template_maker = TemplateMaker(get_values(params),
                                       **model_settings['binning'])
    profile.info("  ==> elapsed time to initialize templates: %s sec"%t.secs)

    #Now get the actual template
    with Timer(verbose=False) as t:
        template_maps = template_maker.get_template(get_values(params),
                                                    return_stages=args.save_all)
    profile.info("==> elapsed time to get template: %s sec"%t.secs)

    logging.info("Saving file to %s"%args.outfile)
    to_json(template_maps, args.outfile)
示例#32
0
            steps['llh'] = []

            # Iterate over the cartesian product, and set fixed parameter to value
            for pos in product(*steplist):
                pos_dict = dict(list(pos))
                print "Running at params-pos dict: ",pos_dict
                for k,v in pos_dict.items():
                    hypo_params[k]['value'] = v
                    steps[k].append(v)

                with Timer() as t:
                    llh_data = find_max_llh_bfgs(asimov_data_set,template_maker,hypo_params,
                                                 minimizer_settings,args.save_steps,
                                                 normal_hierarchy=hypo_normal)
                profile.info("==> elapsed time for optimizer: %s sec"%t.secs)

                steps['llh'].append(llh_data['llh'][-1])

                # Then save the minimized free params later??
                #print "\n\nsteps: ",steps

            #Store the LLH data
            results[data_tag][hypo_tag] = steps

except:
    print "error message: ",sys.exc_info()
    logging.warn("ERROR: outputting what we have now...")

logging.warn("FINISHED. Saving to file: %s"%args.outfile)
to_json(results,args.outfile)
示例#33
0
        "fixed": True
    }
    for key, val in vars(args).items()
    if key not in template_settings['binning'].keys()
}

# Add aeff part:
stem = 'a_eff_'
aeff_dict = {
    'NC': stem + 'nuall_nc.dat',
    'NC_bar': stem + 'nuallbar_nc.dat',
    'nue': stem + 'nue.dat',
    'nue_bar': stem + 'nuebar.dat',
    'numu': stem + 'numu.dat',
    'numu_bar': stem + 'numubar.dat',
    'nutau': stem + 'nutau.dat',
    'nutau_bar': stem + 'nutaubar.dat'
}
aeff_dir = template_settings['params']['aeff_egy_par_dir']['value']
template_settings['params'].pop('aeff_egy_par_dir')
template_settings['params']['aeff_egy_par'] = {'value': {}, 'fixed': True}

for key, val in aeff_dict.items():
    template_settings['params']['aeff_egy_par']['value'][key] = aeff_dir + val

for key, val in dbparams.items():
    template_settings['params'][key] = val

print "Writing all settings to: ", outfile
to_json(template_settings, outfile)
示例#34
0
                        default="template.json",
                        help='file to store the output')
    args = parser.parse_args()

    set_verbosity(args.verbose)

    with Timer() as t:
        #Load all the settings
        model_settings = from_json(args.template_settings)

        #Select a hierarchy
        logging.info('Selected %s hierarchy' %
                     ('normal' if args.normal else 'inverted'))
        params = select_hierarchy(model_settings['params'],
                                  normal_hierarchy=args.normal)

        #Intialize template maker
        template_maker = TemplateMaker(get_values(params),
                                       **model_settings['binning'])
    tprofile.info("  ==> elapsed time to initialize templates: %s sec" %
                  t.secs)

    #Now get the actual template
    with Timer(verbose=False) as t:
        template_maps = template_maker.get_template(
            get_values(params), return_stages=args.save_all)
    tprofile.info("==> elapsed time to get template: %s sec" % t.secs)

    logging.info("Saving file to %s" % args.outfile)
    to_json(template_maps, args.outfile)
示例#35
0
文件: Aeff.py 项目: gkrueckl/pisa
        dest="outfile",
        metavar="FILE",
        type=str,
        action="store",
        default="event_rate.json",
        help="""file to store the output""",
    )
    parser.add_argument("-v", "--verbose", action="count", default=None, help="""set verbosity level""")
    args = parser.parse_args()

    # Set verbosity level
    set_verbosity(args.verbose)

    # Check binning
    ebins, czbins = check_binning(args.osc_flux_maps)

    logging.info("Defining aeff_service...")

    if args.mc_mode:
        logging.info("  Using effective area from EVENT DATA...")
        aeff_service = AeffServiceMC(ebins, czbins, aeff_weight_file=args.weighted_aeff_file)
    else:
        logging.info("  Using effective area from PARAMETRIZATION...")
        aeff_settings = from_json(find_resource(args.settings_file))
        aeff_service = AeffServicePar(ebins, czbins, **aeff_settings)

    event_rate_maps = get_event_rates(args.osc_flux_maps, aeff_service, args.livetime, args.aeff_scale)

    logging.info("Saving output to: %s" % args.outfile)
    to_json(event_rate_maps, args.outfile)
示例#36
0
    }

    if args.code == 'prob3':
        if iniargs['prop_height'] is None: iniargs['prop_height'] = 20.0
        osc_service = Prob3OscillationService(ebins, czbins, **iniargs)
    elif args.code == 'nucraft':
        if iniargs['prop_height'] is None: iniargs['prop_height'] = 'sample'
        osc_service = NucraftOscillationService(ebins, czbins, **iniargs)
    elif args.code == 'gpu':
        settings = vars(args)
        osc_service = Prob3GPUOscillationService(ebins, czbins, **settings)
    else:
        osc_service = TableOscillationService(ebins, czbins, **iniargs)

    logging.info("Getting osc prob maps")
    osc_flux_maps = get_osc_flux(args.flux_maps,
                                 osc_service,
                                 deltam21=args.deltam21,
                                 deltam31=args.deltam31,
                                 deltacp=args.deltacp,
                                 theta12=args.theta12,
                                 theta13=args.theta13,
                                 theta23=args.theta23,
                                 oversample_e=args.oversample_e,
                                 oversample_cz=args.oversample_cz,
                                 energy_scale=args.energy_scale)

    #Write out
    logging.info("Saving output to: %s", args.outfile)
    to_json(osc_flux_maps, args.outfile)
示例#37
0
arg_dict = vars(args)
for key in (template_settings['binning'].keys()+['free_params','dbfile','outfile']):
    arg_dict.pop(key)

# Now params:
template_settings['params'] = {key: {"value": val, "fixed": True}
                               for key,val in vars(args).items()
                               if key not in template_settings['binning'].keys()}

# Add aeff part:
stem = 'a_eff_'
aeff_dict = {'NC': stem+'nuall_nc.dat',
             'NC_bar': stem+'nuallbar_nc.dat',
             'nue': stem+'nue.dat',
             'nue_bar': stem+'nuebar.dat',
             'numu': stem+'numu.dat',
             'numu_bar': stem+'numubar.dat',
             'nutau': stem+'nutau.dat',
             'nutau_bar': stem+'nutaubar.dat'}
aeff_dir = template_settings['params']['aeff_egy_par_dir']['value']
template_settings['params'].pop('aeff_egy_par_dir')
template_settings['params']['aeff_egy_par'] = {'value':{},'fixed':True}

for key,val in aeff_dict.items():
    template_settings['params']['aeff_egy_par']['value'][key] = aeff_dir+val

for key,val in dbparams.items(): template_settings['params'][key] = val

print "Writing all settings to: ",outfile
to_json(template_settings,outfile)
示例#38
0
def get_fisher_matrices(template_settings, grid_settings, IMH=True, NMH=False, dump_all_stages=False,
		        save_templates=False, outdir=None):
  '''
  Main function that runs the Fisher analysis for the chosen hierarchy(ies) (inverted by default).

  Returns a dictionary of Fisher matrices, in the format:
  {'IMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
          },
  'NMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
         }
  }

  If save_templates=True and no hierarchy is given, only fiducial templates will be written out;
  if one is given, then the templates used to obtain the gradients will be written out in
  addition.
  '''
  if outdir is None and (save_templates or dump_all_stages):
    logging.info("No output directory specified. Will save templates to current working directory.")
    outdir = os.getcwd()

  profile.info("start initializing")

  # Get the parameters
  params = template_settings['params']
  bins = template_settings['binning']

  # Artifically add the hierarchy parameter to the list of parameters
  # The method get_hierarchy_gradients below will know how to deal with it
  params['hierarchy_nh'] = { "value": 1., "range": [0.,1.],
                           "fixed": False, "prior": None}
  params['hierarchy_ih'] = { "value": 0., "range": [0.,1.],
                           "fixed": False, "prior": None}

  chosen_data = []
  if IMH:
    chosen_data.append(('IMH',False))
    logging.info("Fisher matrix will be built for IMH.")
  if NMH:
    chosen_data.append(('NMH',True))
    logging.info("Fisher matrix will be built for NMH.")
  if chosen_data == []:
    # In this case, only the fiducial maps (for both hierarchies) will be written
    logging.info("No Fisher matrices will be built.")

  # There is no sense in performing any of the following steps if no Fisher matrices are to be built
  # and no templates are to be saved.
  if chosen_data!=[] or dump_all_stages or save_templates:

    # Initialise return dict to hold Fisher matrices
    fisher = { data_tag:{'cscd':[],'trck':[],'comb':[]} for data_tag, data_normal in chosen_data }

    # Get a template maker with the settings used to initialize
    template_maker = TemplateMaker(get_values(params),**bins)

    profile.info("stop initializing\n")

    # Generate fiducial templates for both hierarchies (needed for partial derivatives
    # w.r.t. hierarchy parameter)
    fiducial_maps = {}
    for hierarchy in ['NMH','IMH']:

      logging.info("Generating fiducial templates for %s."%hierarchy)

      # Get the fiducial parameter values corresponding to this hierarchy
      fiducial_params = select_hierarchy(params,normal_hierarchy=(hierarchy=='NMH'))

      # Generate fiducial maps, either all of them or only the ultimate one
      profile.info("start template calculation")
      with Timer() as t:
        fid_maps = template_maker.get_template(get_values(fiducial_params),
                                               return_stages=dump_all_stages)
      profile.info("==> elapsed time for template: %s sec"%t.secs)

      fiducial_maps[hierarchy] = fid_maps[4] if dump_all_stages else fid_maps

      # save fiducial map(s)
      # all stages
      if dump_all_stages:
        stage_names = ("0_unoscillated_flux","1_oscillated_flux","2_oscillated_counts","3_reco","4_pid")
        stage_maps = {}
        for stage in xrange(0,len(fid_maps)):
          stage_maps[stage_names[stage]] = fid_maps[stage]
        logging.info("Writing fiducial maps (all stages) for %s to %s."%(hierarchy,outdir))
        to_json(stage_maps,os.path.join(outdir,"fid_map_"+hierarchy+".json"))
      # only the final stage
      elif save_templates:
        logging.info("Writing fiducial map (final stage) for %s to %s."%(hierarchy,outdir))
        to_json(fiducial_maps[hierarchy],os.path.join(outdir,"fid_map_"+hierarchy+".json"))

    # Get_gradients and get_hierarchy_gradients will both (temporarily)
    # store the templates used to generate the gradient maps
    store_dir = outdir if save_templates else tempfile.gettempdir()

    # Calculate Fisher matrices for the user-defined cases (NHM true and/or IMH true)
    for data_tag, data_normal in chosen_data:

      logging.info("Running Fisher analysis for %s."%(data_tag))

      # The fiducial params are selected from the hierarchy case that does NOT match
      # the data, as we are varying from this model to find the 'best fit'
      fiducial_params = select_hierarchy(params,not data_normal)

      # Get the free parameters (i.e. those for which the gradients should be calculated)
      free_params = select_hierarchy(get_free_params(params),not data_normal)
      gradient_maps = {}
      for param in free_params.keys():
        # Special treatment for the hierarchy parameter
        if param=='hierarchy':
          gradient_maps[param] = get_hierarchy_gradients(data_tag,
						     fiducial_maps,
						     fiducial_params,
						     grid_settings,
						     store_dir,
						     )
        else:
          gradient_maps[param] = get_gradients(data_tag,
					   param,
                                           template_maker,
                                           fiducial_params,
                                           grid_settings,
                                           store_dir
                                           )

      logging.info("Building Fisher matrix for %s."%(data_tag))

      # Build Fisher matrices for the given hierarchy
      fisher[data_tag] = build_fisher_matrix(gradient_maps,fiducial_maps['IMH'] if data_normal else fiducial_maps['NMH'],fiducial_params)

      # If Fisher matrices exist for both channels, add the matrices to obtain the combined one.
      if len(fisher[data_tag].keys()) > 1:
        fisher[data_tag]['comb'] = FisherMatrix(matrix=np.array([f.matrix for f in fisher[data_tag].itervalues()]).sum(axis=0),
                                              parameters=gradient_maps.keys(),  #order is important here!
                                              best_fits=[fiducial_params[par]['value'] for par in gradient_maps.keys()],
                                              priors=[fiducial_params[par]['prior'] for par in gradient_maps.keys()],
                                              )
    return fisher

  else:
    logging.info("Nothing to be done.")
    return {}
egy_res = papa_NC['e']
papa_NC.pop('e')
papa_NC['energy'] = egy_res

pisa_reco_settings = {}
for flav in pisa_flavs:
    papa_res = parameterizations[flav]
    egy_res = papa_res['e']
    papa_res.pop('e')
    papa_res['energy'] = egy_res

    for ID in mID:
        pisa_reco_settings[flav+ID] = {'cc':{},'nc':{}}
        pisa_reco_settings[flav+ID]['cc'] = copy(papa_res)
        pisa_reco_settings[flav+ID]['nc'] = copy(papa_NC)

to_json(pisa_reco_settings,args.outfile)


# Now, read in the file as if it were a text file and replace n. with np.
fh = open(args.outfile,'r')
output_lines = []
for line in fh.readlines():
    output_lines.append(line.replace("n.","np."))
fh.close()

fh = open(args.outfile,'w')
for line in output_lines:
    fh.write(line)
fh.close()
                    help="File containing CAKE (PISA V3) data")
args = parser.parse_args()

cake_array = from_json(args.cake_file)['maps']

output_array = []

total_trck_dict = {}
total_cscd_dict = {}

for cake_dict in cake_array:
    if 'trck' in cake_dict['name']:
        if len(total_trck_dict.keys()) == 0:
            total_trck_dict = cake_dict
            total_trck_dict['name'] = 'trck'
        else:
            total_trck_dict['hist'] += cake_dict['hist']
    elif 'cscd' in cake_dict['name']:
        if len(total_cscd_dict.keys()) == 0:
            total_cscd_dict = cake_dict
            total_cscd_dict['name'] = 'cscd'
        else:
            total_cscd_dict['hist'] += cake_dict['hist']

outputarray = []
outputarray.append(total_trck_dict)
outputarray.append(total_cscd_dict)

to_json(np.array(outputarray),'merged.json')
    
示例#41
0
        # 1) get a pseudo data fmap from fiducial model (best fit vals of params).
        fmap = get_pseudo_data_fmap(template_maker,
                                    get_values(select_hierarchy(params,
                                                                normal_hierarchy=data_normal)),
                                    seed=results[data_tag]['seed'])

        # 2) find max llh (and best fit free params) from matching pseudo data
        #    to templates.
        for hypo_tag, hypo_normal in [('hypo_NMH',True),('hypo_IMH',False)]:

            physics.info("Finding best fit for %s under %s assumption"%(data_tag,hypo_tag))
            profile.info("start optimizer")
            llh_data = find_max_llh_bfgs(fmap,template_maker,params,
                                        minimizer_settings,args.save_steps,normal_hierarchy=hypo_normal)
            profile.info("stop optimizer")

            #Store the LLH data
            results[data_tag][hypo_tag] = llh_data


    #Store this trial
    trials += [results]
    profile.info("stop trial %d"%itrial)

#Assemble output dict
output = {'trials' : trials,
          'template_settings' : template_settings,
          'minimizer_settings' : minimizer_settings}
#And write to file
to_json(output,args.outfile)
示例#42
0
        asimov_data_set = get_asimov_fmap(template_maker,
                                          asimov_params,
                                          channel=asimov_params['channel'])

        # Store injected true values in result:
        for key in free_params.keys():
            if 'theta23' in key: continue
            result['true_' + key].append(asimov_params[key])
        result['true_theta23'].append(step)

        result['asimov_data'].append(asimov_data_set)

        # now get fitted values of opposite hierarchy:
        hypo_normal = False if true_normal else True
        hypo_tag = 'hypo_IMH' if true_normal else 'hypo_NMH'
        llh_data = find_alt_hierarchy_fit(asimov_data_set,
                                          template_maker,
                                          params,
                                          hypo_normal,
                                          minimizer_settings,
                                          only_atm_params=False,
                                          check_octant=args.check_octant)

        for key in free_params.keys():
            result['fit_' + key].append(llh_data[key][-1])

    results[true_tag] = result

logging.warn("FINISHED. Saving to file: %s" % args.outfile)
to_json(results, args.outfile)
示例#43
0
文件: Aeff.py 项目: gkrueckl/pisa
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=None,
                        help='''set verbosity level''')
    args = parser.parse_args()

    #Set verbosity level
    set_verbosity(args.verbose)

    #Check binning
    ebins, czbins = check_binning(args.osc_flux_maps)

    logging.info("Defining aeff_service...")

    if args.mc_mode:
        logging.info("  Using effective area from EVENT DATA...")
        aeff_service = AeffServiceMC(ebins,
                                     czbins,
                                     aeff_weight_file=args.weighted_aeff_file)
    else:
        logging.info("  Using effective area from PARAMETRIZATION...")
        aeff_settings = from_json(find_resource(args.settings_file))
        aeff_service = AeffServicePar(ebins, czbins, **aeff_settings)

    event_rate_maps = get_event_rates(args.osc_flux_maps, aeff_service,
                                      args.livetime, args.aeff_scale)

    logging.info("Saving output to: %s" % args.outfile)
    to_json(event_rate_maps, args.outfile)
示例#44
0
文件: Flux.py 项目: sonia3994/pisa
    parser.add_argument('--delta_index',metavar='FLOAT',type=float,
                        default=0.0,help='''Shift in spectral index of numu''')
    parser.add_argument('--energy_scale',metavar='FLOAT',type=float,
                        help='''Factor to scale TRUE energy by''',default=1.0)
    parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE',
                        type=str, action='store', default='flux.json',
                        help='file to store the output')
    parser.add_argument('-v', '--verbose', action='count', default=None,
                        help='set verbosity level')
    args = parser.parse_args()

    #Set verbosity level
    set_verbosity(args.verbose)

    logging.debug("Using %u bins in energy from %.2f to %.2f GeV"%
                                (len(args.ebins)-1,args.ebins[0],args.ebins[-1]))
    logging.debug("Using %u bins in cos(zenith) from %.2f to %.2f"%
                                (len(args.czbins)-1,args.czbins[0],args.czbins[-1]))

    #Instantiate a flux model
    flux_model = HondaFluxService(args.flux_file)

    #get the flux
    flux_maps = get_flux_maps(
        flux_model, args.ebins, args.czbins, args.nue_numu_ratio, args.nu_nubar_ratio,
        args.energy_scale, args.delta_index)

    #write out to a file
    logging.info("Saving output to: %s"%args.outfile)
    to_json(flux_maps, args.outfile)
示例#45
0
def summarize_clsim_table(table_fpath,
                          table=None,
                          save_summary=True,
                          outdir=None):
    """
    Parameters
    ----------
    table_fpath : string
        Path to table (or just the table's filename if `outdir` is specified)

    table : mapping, optional
        If the table has already been loaded, it can be passed here to avoid
        re-loading the table.

    save_summary : bool
        Whether to save the table summary to disk.

    outdir : string, optional
        If `save_summary` is True, write the summary to this directory. If
        `outdir` is not specified and `save_summary` is True, the summary will
        be written to the same directory that contains `table_fpath`.

    Returns
    -------
    table
        See `load_clsim_table` for details of the data structure

    summary : OrderedDict

    """
    t_start = time()
    if save_summary:
        from pisa.utils.jsons import from_json, to_json

    table_fpath = expand(table_fpath)
    srcdir, clsim_fname = dirname(table_fpath), basename(table_fpath)
    invalid_fname = False
    try:
        fname_info = interpret_clsim_table_fname(clsim_fname)
    except ValueError:
        invalid_fname = True
        fname_info = {}

    if outdir is None:
        outdir = srcdir
    outdir = expand(outdir)
    mkdir(outdir)

    if invalid_fname:
        metapath = None
    else:
        metaname = (CLSIM_TABLE_METANAME_PROTO[-1].format(
            hash_val=fname_info['hash_val']))
        metapath = join(outdir, metaname)
    if metapath and isfile(metapath):
        meta = from_json(metapath)
    else:
        meta = dict()

    if table is None:
        table = load_clsim_table(table_fpath)

    summary = OrderedDict()
    for key in table.keys():
        if key == 'table':
            continue
        summary[key] = table[key]
    if fname_info:
        for key in ('hash_val', 'string', 'depth_idx', 'seed'):
            summary[key] = fname_info[key]
    # TODO: Add hole ice info when added to tray_kw_to_hash
    if meta:
        summary['n_events'] = meta['tray_kw_to_hash']['NEvents']
        summary['ice_model'] = meta['tray_kw_to_hash']['IceModel']
        summary['tilt'] = not meta['tray_kw_to_hash']['DisableTilt']
        for key, val in meta.items():
            if key.endswith('_binning_kw'):
                summary[key] = val
    elif 'fname_version' in fname_info and fname_info['fname_version'] == 1:
        summary['n_events'] = fname_info['n_events']
        summary['ice_model'] = 'spice_mie'
        summary['tilt'] = False
        summary['r_binning_kw'] = dict(min=0.0, max=400.0, n_bins=200, power=2)
        summary['costheta_binning_kw'] = dict(min=-1, max=1, n_bins=40)
        summary['t_binning_kw'] = dict(min=0.0, max=3000.0, n_bins=300)
        summary['costhetadir_binning_kw'] = dict(min=-1, max=1, n_bins=20)
        summary['deltaphidir_binning_kw'] = dict(min=0.0, max=np.pi, n_bins=20)

    # Save marginal distributions and info to file
    norm = (
        1 / table['n_photons'] /
        (SPEED_OF_LIGHT_M_PER_NS / table['phase_refractive_index'] *
         np.mean(np.diff(table['t_bin_edges'])))
        #* table['angular_acceptance_fract']
        * (len(table['costheta_bin_edges']) - 1))
    summary['norm'] = norm

    dim_names = ('r', 'costheta', 't', 'costhetadir', 'deltaphidir')
    n_dims = len(table['table_shape'])
    assert n_dims == len(dim_names)

    # Apply norm to underflow and overflow so magnitudes can be compared
    # relative to plotted marginal distributions
    for flow, idx in product(('underflow', 'overflow'), iter(range(n_dims))):
        summary[flow][idx] = summary[flow][idx] * norm

    wstderr('Finding marginal distributions...\n')
    wstderr('    masking off zeros in table...')
    t0 = time()
    nonzero_table = np.ma.masked_equal(table['table'], 0)
    wstderr(' ({} ms)\n'.format(np.round((time() - t0) * 1e3, 3)))

    t0_marg = time()
    summary['dimensions'] = OrderedDict()
    for keep_axis, ax_name in zip(tuple(range(n_dims)), dim_names):
        remove_axes = list(range(n_dims))
        remove_axes.pop(keep_axis)
        remove_axes = tuple(remove_axes)
        axis = OrderedDict()

        wstderr('    mean across non-{} axes...'.format(ax_name))
        t0 = time()
        axis['mean'] = norm * np.asarray(
            np.mean(table['table'], axis=remove_axes))
        wstderr(' ({} s)\n'.format(np.round(time() - t0, 3)))

        wstderr('    median across non-{} axes...'.format(ax_name))
        t0 = time()
        axis['median'] = norm * np.asarray(
            np.ma.median(nonzero_table, axis=remove_axes))
        wstderr(' ({} s)\n'.format(np.round(time() - t0, 3)))

        wstderr('    max across non-{} axes...'.format(ax_name))
        t0 = time()
        axis['max'] = norm * np.asarray(
            np.max(table['table'], axis=remove_axes))
        wstderr(' ({} s)\n'.format(np.round(time() - t0, 3)))
        summary['dimensions'][ax_name] = axis
    wstderr('  Total time to find marginal distributions: {} s\n'.format(
        np.round(time() - t0_marg, 3)))

    if save_summary:
        ext = None
        base_fname = clsim_fname
        while ext not in ('', '.fits'):
            base_fname, ext = splitext(base_fname)
            ext = ext.lower()
        outfpath = join(outdir, base_fname + '_summary.json.bz2')
        to_json(summary, outfpath)
        print('saved summary to "{}"'.format(outfpath))

    wstderr('Time to summarize table: {} s\n'.format(
        np.round(time() - t_start, 3)))

    return table, summary
示例#46
0
    
    #Initialize an oscillation service
    logging.info('Using %s oscillation code'%args.code)
    if args.code=='prob3':
      osc_service = Prob3OscillationService(**settings)
    elif args.code=='table':
      osc_service = TableOscillationService(**settings)
    elif args.code=='nuCraft':
      osc_service = NucraftOscillationService(**settings)
    else:
      raise NotImplementedError('Unknown oscillation service: %s'%args.code)
    
    #Do calculation
    logging.info("Calculating oscillation probabilities")
    osc_prob_maps = osc_service.get_osc_prob_maps(**vars(args))
    
    #Remove irrelevant parameters from settings
    for par in ['verbose', 'outfile', 'ebins', 'czbins']:
        settings.pop(par)
    
    #Write out
    logging.info("Saving output to: %s",args.outfile)
    if fmt=='JSON':
        osc_prob_maps['params'] = settings
        to_json(osc_prob_maps, args.outfile)
    elif fmt=='HDF5':
        to_hdf5(osc_prob_maps, args.outfile, settings)
        logging.debug('Wrote %.2f MBytes to %s'%
                     (os.path.getsize(args.outfile)/(1024.**2),
                      os.path.basename(args.outfile)))