def resolve_targetfile(self, args, require_sim_name=False): # x """Get the name of the targetfile based on the job arguments""" ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return (None, None) sim = args.get('sim') if is_null(sim): if require_sim_name: sys.stderr.write('Simulation scenario must be specified') return (None, None) else: sim = None name_keys = dict(target_type=ttype, targetlist='target_list.yaml', sim_name=sim, fullpath=True) if sim is None: targetfile = self.targetfile(**name_keys) else: targetfile = self.sim_targetfile(**name_keys) targets_override = args.get('targetfile') if is_not_null(targets_override): targetfile = targets_override return (targetfile, sim)
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not args.rosters: raise RuntimeError("You must specify at least one target roster") if is_null(args.ttype): raise RuntimeError("You must specify a target type") if is_null(args.sims): sims = [] else: sims = args.sims if is_null(args.alias_dict): aliases = None else: aliases = load_yaml(args.alias_dict) name_keys = dict(target_type=args.ttype, fullpath=True) config_file = NAME_FACTORY.ttypeconfig(**name_keys) if is_not_null(args.config): config_file = args.config roster_dict = {} for roster in args.rosters: a_roster = load_yaml(roster) roster_dict.update(a_roster) base_config = load_yaml(config_file) self._write_target_dirs(args.ttype, roster_dict, base_config, sims, args.spatial_models, aliases)
def set_wts_get_npred_wt(gta, maskname): """Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred """ if is_null(maskname): maskname = None gta.set_weights_map(maskname) for name in gta.like.sourceNames(): gta._init_source(name) gta._update_roi() return build_srcdict(gta, 'npred_wt')
def purge_dict(idict): """Remove null items from a dictionary """ odict = {} for key, val in idict.items(): if is_null(val): continue odict[key] = val return odict
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if is_null(args.config): raise ValueError("Config yaml file must be specified") if is_null(args.rand_config): raise ValueError( "Random direction config yaml file must be specified") config = load_yaml(args.config) rand_config = load_yaml(args.rand_config) wcsgeom = self._make_wcsgeom_from_config(config) dir_dict = self._build_skydir_dict(wcsgeom, rand_config) if is_not_null(args.outfile): write_yaml(dir_dict, args.outfile)
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args, require_sim_name=True) if targets_yaml is None: return job_configs specconfig = NAME_FACTORY.resolve_specconfig(args) astro_priors = args['astro_priors'] write_full = args.get('write_full', False) targets = load_yaml(targets_yaml) base_config = dict(nsims=args['nsims'], seed=args['seed'], specconfig=specconfig) for target_name, profile_list in list(targets.items()): for profile in profile_list: for astro_prior in astro_priors: if is_null(astro_prior): astro_prior = 'none' full_key = "%s:%s:%s:%s" % (target_name, profile, sim, astro_prior) name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, profile=profile, astro_prior=astro_prior, fullpath=True) limitfile = NAME_FACTORY.sim_dmlimitsfile(**name_keys) first = args['seed'] last = first + args['nsims'] - 1 outfile = limitfile.replace( '_SEED.fits', '_collected_%06i_%06i.fits' % (first, last)) logfile = make_nfs_path(outfile.replace('.fits', '.log')) if not write_full: outfile = None summaryfile = limitfile.replace( '_SEED.fits', '_summary_%06i_%06i.fits' % (first, last)) job_config = base_config.copy() job_config.update( dict(limitfile=limitfile, astro_prior=astro_prior, outfile=outfile, summaryfile=summaryfile, logfile=logfile)) job_configs[full_key] = job_config return job_configs
def convert_option_dict_to_dict(option_dict): """Convert a dictionary of options tuples to a simple key-value dictionary""" ret_dict = {} for key, value in option_dict.items(): if is_null(value): ret_dict[key] = None elif isinstance(value, tuple): ret_dict[key] = value[0] else: ret_dict[key] = value return ret_dict
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if DMSKY_ROSTER_LIB: roster_lib = RosterLibrary() roster_dict = {} else: raise RuntimeError( "Can't load roster library, probably b/c old version of yaml is not compatible with dmsky" ) if not args.rosters: raise RuntimeError("You must specify at least one target roster") if is_null(args.ttype): raise RuntimeError("You must specify a target type") if is_null(args.sims): sims = [] else: sims = args.sims if is_null(args.alias_dict): aliases = None else: aliases = load_yaml(args.alias_dict) name_keys = dict(target_type=args.ttype, fullpath=True) config_file = NAME_FACTORY.ttypeconfig(**name_keys) if is_not_null(args.config): config_file = args.config for roster in args.rosters: rost = roster_lib.create_roster(roster) roster_dict[roster] = rost base_config = load_yaml(config_file) self._write_target_dirs(args.ttype, roster_dict, base_config, sims, args.spatial_models, aliases)
def resolve_specfile(self, args): """Get the name of the specturm file based on the job arguments""" ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return None name_keys = dict(target_type=ttype, fullpath=True) specfile = self.specfile(**name_keys) spec_override = args.get('specfile') if is_not_null(spec_override): specfile = spec_override return specfile
def resolve_randconfig(self, args): """Get the name of the specturm file based on the job arguments""" ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return None name_keys = dict(target_type=ttype, fullpath=True) randconfig = self.randconfig(**name_keys) rand_override = args.get('rand_config') if is_not_null(rand_override): randconfig = rand_override return randconfig
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} ttype = args['ttype'] (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile( args, require_sim_name=True) if roster_yaml is None: return job_configs specconfig = NAME_FACTORY.resolve_specconfig(args) astro_priors = args['astro_priors'] write_full = args['write_full'] first = args['seed'] last = first + args['nsims'] - 1 base_config = dict(nsims=args['nsims'], seed=args['seed']) roster_dict = load_yaml(roster_yaml) for roster_name in roster_dict.keys(): for astro_prior in astro_priors: if is_null(astro_prior): astro_prior = 'none' full_key = "%s:%s:%s" % (roster_name, sim, astro_prior) name_keys = dict(target_type=ttype, roster_name=roster_name, sim_name=sim, astro_prior=astro_prior, fullpath=True) limitfile = NAME_FACTORY.sim_stackedlimitsfile(**name_keys) outfile = limitfile.replace( '_SEED.fits', '_collected_%06i_%06i.fits' % (first, last)) logfile = make_nfs_path(outfile.replace('.fits', '.log')) if not write_full: outfile = None summaryfile = limitfile.replace('_SEED.fits', '_summary.fits') job_config = base_config.copy() job_config.update(dict(limitfile=limitfile, specconfig=specconfig, astro_prior=astro_prior, outfile=outfile, summaryfile=summaryfile, logfile=logfile)) job_configs[full_key] = job_config return job_configs
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if args.ttype is None: raise RuntimeError('Target type must be specified') name_keys = dict(target_type=args.ttype, rosterlist='roster_list.yaml', sim_name=args.sim, fullpath=True) spec_config = NAME_FACTORY.specconfig(**name_keys) if is_not_null(args.specconfig): spec_config = args.specconfig spec_config = load_yaml(spec_config) channels = spec_config['channels'] if is_not_null(args.sim): roster_file = NAME_FACTORY.sim_rosterfile(**name_keys) sim_name = args.sim is_sim = True else: roster_file = NAME_FACTORY.rosterfile(**name_keys) is_sim = False sim_name = None if is_not_null(args.rosterlist): roster_file = args.rosterlist roster_dict = load_yaml(roster_file) if is_sim: seedlist = list(range(args.seed, args.seed + args.nsims)) else: seedlist = [0] astro_prior = args.astro_prior if is_null(astro_prior): astro_prior = 'none' for seed in seedlist: StackLikelihood.stack_rosters(roster_dict, args.ttype, channels, astro_prior, sim_name, seed, args.clobber)
def create_inputlist(arglist): """Read lines from a file and makes a list of file names. Removes whitespace and lines that start with '#' Recursively read all files with the extension '.lst' """ lines = [] if isinstance(arglist, list): for arg in arglist: if os.path.splitext(arg)[1] == '.lst': lines += readlines(arg) else: lines.append(arg) elif is_null(arglist): pass else: if os.path.splitext(arglist)[1] == '.lst': lines += readlines(arglist) else: lines.append(arglist) return lines
def command_template(self): """Build and return a string that can be used as a template invoking this chain from the command line. The actual command can be obtainted by using `self.command_template().format(**self.args)` """ com_out = self.appname arg_string = "" flag_string = "" # Loop over the key, value pairs in self.args for key, val in self.args.items(): # Check if the value is set in self._options # If so, get the value from there if val is None: opt_val = self._options[key][0] else: opt_val = val opt_type = self._options[key][2] if key == 'args': # 'args' is special, pull it out and move it to the back arg_string += ' {%s}' % key elif opt_type is bool: if opt_val: flag_string += ' --%s' % (key) elif opt_type is list: if is_null(opt_val): continue elif isinstance(opt_val, str): com_out += ' --%s %s' % (key, opt_val) elif isinstance(opt_val, list): for arg_val in opt_val: com_out += ' --%s %s' % (key, arg_val) else: com_out += ' --%s {%s}' % (key, key) com_out += flag_string com_out += arg_string return com_out
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") if is_null(args.skydirs): skydir_dict = None else: skydir_dict = load_yaml(args.skydirs) gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) #gta.setup(overwrite=False) gta.load_roi(args.roi_baseline) gta.print_roi() basedir = os.path.dirname(args.config) # This should be a no-op, b/c it was done in the baseline analysis for profile in args.profiles: if skydir_dict is None: skydir_keys = [None] else: skydir_keys = sorted(skydir_dict.keys()) for skydir_key in skydir_keys: if skydir_key is None: pkey, pdict = AnalyzeSED._build_profile_dict( basedir, profile) else: skydir_val = skydir_dict[skydir_key] pkey, pdict = AnalyzeSED._build_profile_dict( basedir, profile) pdict['ra'] = skydir_val['ra'] pdict['dec'] = skydir_val['dec'] pkey += "_%06i" % skydir_key outfile = "sed_%s.fits" % pkey # Add the source and get the list of correlated soruces correl_dict = add_source_get_correlated(gta, pkey, pdict, correl_thresh=0.25) # Write the list of correlated sources correl_yaml = os.path.join(basedir, "correl_%s.yaml" % pkey) write_yaml(correl_dict, correl_yaml) gta.free_sources(False) for src_name in correl_dict.keys(): gta.free_source(src_name, pars='norm') # build the SED gta.sed(pkey, outfile=outfile, make_plots=args.make_plots) # remove the source gta.delete_source(pkey) # put the ROI back to how it was gta.load_xml(args.roi_baseline) return gta
def convert_castro_data(self, castro_data, spec_name, **kwargs): """ Convert CastroData object, i.e., Likelihood as a function of flux and energy flux, to a StackCastroData object, i.e., Likelihood as a function of stacking normalization and index Parameters ---------- castro_data : `CastroData` Input data spec_name : str Name of the stacking spectra Keyword arguments ----------------- other_pars : dict Values for the other paramters norm_type : str Type of spectral normalization to use for the conversion astro_factor : dict or float or None Nuisance factor used to make the conversion. If astro_factor is None, it will use the reference value If astro_factor is a float, it will use that If astro_factor is a dict, it will use that to create a prior Returns ------- output : `StackCastroData` The stacking-space likelihood curves """ xscan_name = kwargs.get('xscan_name', 'index') astro_factor = kwargs.get('astro_factor', None) other_pars = kwargs.get('other_pars', {}) spec_type = kwargs.get('spec_type', 'eflux') if not self.check_energy_bins(castro_data.refSpec): raise ValueError("CastroData energy binning does not match") xscan_vals = self.par_vals(spec_name, xscan_name, other_pars) spec_vals = self.spectra(spec_name, other_pars, spec_type) # Get the reference values ref_vals = self.ref_vals(spec_name) ref_norm = ref_vals["REF_NORM"] astro_prior = None if is_null(astro_factor): # Just use the reference values astro_value = ref_norm norm_factor = 1. elif isinstance(astro_factor, float): # Rescale the normalization values astro_value = astro_factor norm_factor = ref_norm / astro_factor elif isinstance(astro_factor, dict): astro_value = astro_factor.get('d_value') norm_factor = ref_norm / astro_value astro_factor['scale'] = astro_value astro_functype = astro_factor.get('functype', None) if is_null(astro_functype): astro_prior = None else: astro_prior = create_prior_functor(astro_factor) else: sys.stderr.write("Did not recoginize Astro factor %s %s\n" % (astro_factor, type(astro_factor))) kwcopy = kwargs.copy() kwcopy['astro_prior'] = astro_prior kwcopy['spec_name'] = spec_name kwcopy['norm_factor'] = norm_factor kwcopy['ref_stack'] = ref_vals["REF_STACK"] kwcopy['ref_norm'] = ref_norm stack_castro = self.compute_castro_data(castro_data, spec_vals, xscan_vals, **kwcopy) return stack_castro
def _map_arguments(self, args): """Map from the top-level arguments to the arguments provided to the indiviudal links """ comp_file = args.get('comp', None) datafile = args.get('data', None) if is_null(comp_file): return if is_null(datafile): return NAME_FACTORY.update_base_dict(datafile) outdir = args.get('outdir', None) outkey = args.get('outkey', None) ft1file = args['ft1file'] if is_null(outdir) or is_null(outkey): return pfiles = os.path.join(outdir, outkey) self.comp_dict = yaml.safe_load(open(comp_file)) coordsys = self.comp_dict.pop('coordsys') full_out_dir = make_nfs_path(os.path.join(outdir, outkey)) for key_e, comp_e in sorted(self.comp_dict.items()): emin = math.pow(10., comp_e['log_emin']) emax = math.pow(10., comp_e['log_emax']) enumbins = comp_e['enumbins'] zmax = comp_e['zmax'] zcut = "zmax%i" % comp_e['zmax'] evclassstr = NAME_FACTORY.base_dict['evclass'] kwargs_select = dict(zcut=zcut, ebin=key_e, psftype='ALL', coordsys=coordsys, mktime='none') selectfile_energy = make_full_path( outdir, outkey, NAME_FACTORY.select(**kwargs_select)) linkname = 'select-energy-%s-%s' % (key_e, zcut) self._set_link(linkname, Gtlink_select, infile=ft1file, outfile=selectfile_energy, zmax=zmax, emin=emin, emax=emax, evclass=NAME_FACTORY.evclassmask(evclassstr), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname)) if 'evtclasses' in comp_e: evtclasslist_vals = comp_e['evtclasses'] else: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] for evtclassval in evtclasslist_vals: for psf_type, psf_dict in sorted(comp_e['psf_types'].items()): linkname_select = 'select-type-%s-%s-%s-%s' % ( key_e, zcut, evtclassval, psf_type) linkname_bin = 'bin-%s-%s-%s-%s' % (key_e, zcut, evtclassval, psf_type) hpx_order = psf_dict['hpx_order'] kwargs_bin = kwargs_select.copy() kwargs_bin['psftype'] = psf_type selectfile_psf = make_full_path( outdir, outkey, NAME_FACTORY.select(**kwargs_bin)) binfile = make_full_path(outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin)) self._set_link( linkname_select, Gtlink_select, infile=selectfile_energy, outfile=selectfile_psf, zmax=zmax, emin=emin, emax=emax, evtype=EVT_TYPE_DICT[psf_type], evclass=NAME_FACTORY.evclassmask(evtclassval), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_select)) self._set_link(linkname_bin, Gtlink_bin, coordsys=coordsys, hpx_order=hpx_order, evfile=selectfile_psf, outfile=binfile, emin=emin, emax=emax, enumbins=enumbins, pfiles=pfiles, logfile=os.path.join( full_out_dir, "%s.log" % linkname_bin))
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") if args.load_baseline: gta = GTAnalysis.create(args.roi_baseline, args.config) else: gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) gta.setup() if is_not_null(args.input_pars): gta.load_parameters_from_yaml(args.input_pars) gta.write_roi(args.roi_baseline, save_model_map=True, save_weight_map=True, make_plots=args.make_plots) src_list = get_src_names(gta) plotter = plotting.AnalysisPlotter(gta.config['plotting'], fileio=gta.config['fileio'], logging=gta.config['logging']) if is_null(args.fit_strategy): return fit_strategy = load_yaml(args.fit_strategy) npred_current = None npred_prev = None plots_only = False for fit_stage in fit_strategy: mask = fit_stage.get('mask', None) npred_threshold = fit_stage.get('npred_threshold', 1.0e4) frac_threshold = fit_stage.get('frac_threshold', 0.5) npred_frac = fit_stage.get('npred_frac', 0.9999) if plots_only: gta.load_roi("%s.npy" % fit_stage['key']) npred_current = set_wts_get_npred_wt(gta, mask) skip_list_region = get_unchanged(src_list, npred_current, npred_prev, frac_threshold=frac_threshold) else: npred_current = set_wts_get_npred_wt(gta, mask) skip_list_region = get_unchanged(src_list, npred_current, npred_prev, frac_threshold=frac_threshold) gta.optimize(npred_frac=npred_frac, npred_threshold=npred_threshold, skip=skip_list_region) snapshot(gta, plotter, fit_stage['key'], make_plots=args.make_plots) npred_prev = npred_current npred_current = build_srcdict(gta, 'npred_wt')
def _map_arguments(self, args): """Map from the top-level arguments to the arguments provided to the indiviudal links """ comp_file = args.get('comp', None) datafile = args.get('data', None) if is_null(comp_file): return if is_null(datafile): return NAME_FACTORY.update_base_dict(datafile) outdir = args.get('outdir', None) outkey = args.get('outkey', None) ft1file = args['ft1file'] if is_null(outdir) or is_null(outkey): return pfiles = os.path.join(outdir, outkey) self.comp_dict = yaml.safe_load(open(comp_file)) coordsys = self.comp_dict.pop('coordsys') full_out_dir = make_nfs_path(os.path.join(outdir, outkey)) for key_e, comp_e in sorted(self.comp_dict.items()): emin = math.pow(10., comp_e['log_emin']) emax = math.pow(10., comp_e['log_emax']) enumbins = comp_e['enumbins'] zmax = comp_e['zmax'] zcut = "zmax%i" % comp_e['zmax'] evclassstr = NAME_FACTORY.base_dict['evclass'] kwargs_select = dict(zcut=zcut, ebin=key_e, psftype='ALL', coordsys=coordsys, mktime='none') selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select)) linkname = 'select-energy-%s-%s' % (key_e, zcut) self._set_link(linkname, Gtlink_select, infile=ft1file, outfile=selectfile_energy, zmax=zmax, emin=emin, emax=emax, evclass=NAME_FACTORY.evclassmask(evclassstr), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname)) if 'evtclasses' in comp_e: evtclasslist_vals = comp_e['evtclasses'] else: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] for evtclassval in evtclasslist_vals: for psf_type, psf_dict in sorted(comp_e['psf_types'].items()): linkname_select = 'select-type-%s-%s-%s-%s' % ( key_e, zcut, evtclassval, psf_type) linkname_bin = 'bin-%s-%s-%s-%s' % (key_e, zcut, evtclassval, psf_type) hpx_order = psf_dict['hpx_order'] kwargs_bin = kwargs_select.copy() kwargs_bin['psftype'] = psf_type selectfile_psf = make_full_path( outdir, outkey, NAME_FACTORY.select(**kwargs_bin)) binfile = make_full_path(outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin)) self._set_link(linkname_select, Gtlink_select, infile=selectfile_energy, outfile=selectfile_psf, zmax=zmax, emin=emin, emax=emax, evtype=EVT_TYPE_DICT[psf_type], evclass=NAME_FACTORY.evclassmask(evtclassval), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_select)) self._set_link(linkname_bin, Gtlink_bin, coordsys=coordsys, hpx_order=hpx_order, evfile=selectfile_psf, outfile=binfile, emin=emin, emax=emax, enumbins=enumbins, pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_bin))
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) norm_type = 'eflux' spec_table = DMSpecTable.create_from_fits(args.specfile) profile = load_yaml(args.astro_value_file) channels = spec_table.channel_names j_factor = None d_factor = None j_value = profile.get('j_integ', None) j_sigma = profile.get('j_sigma', None) if is_null(j_value): j_factor = None elif is_null(args.astro_prior) or is_null(j_sigma) or j_sigma == 0.0: j_factor = j_value else: j_factor = dict(functype=args.astro_prior, j_value=j_value, mu=j_value, sigma=j_sigma) d_value = profile.get('d_integ', None) d_sigma = profile.get('d_sigma', None) if is_null(d_value): d_factor = None elif is_null(args.astro_prior) or is_null(d_sigma) or d_sigma == 0.0: d_factor = d_value else: d_factor = dict(functype=args.astro_prior, d_value=d_value, mu=d_value, sigma=d_sigma) if args.nsims < 0: seedlist = [None] else: seedlist = list(range(args.seed, args.seed + args.nsims)) for seed in seedlist: sedfile = args.sed_file outfile = args.outfile limitfile = args.limitfile if seed is not None: sedfile = sedfile.replace('_SEED.fits', '_%06i.fits' % seed) if is_not_null(outfile): outfile = outfile.replace('_SEED.fits', '_%06i.fits' % seed) if is_not_null(limitfile): limitfile = limitfile.replace('_SEED.fits', '_%06i.fits' % seed) self.convert_sed(spec_table, channels, sedfile, outfile, limitfile, norm_type=norm_type, j_factor=j_factor, d_factor=d_factor, clober=args.clobber)
def convert_castro_data(self, castro_data, channel, norm_type, astro_factor=None): """ Convert CastroData object, i.e., Likelihood as a function of flux and energy flux, to a DMCastroData object, i.e., Likelihood as a function of DM mass and sigmav Parameters ---------- castro_data : `CastroData` Input data channel : int Index for the DM interaction channel norm_type : str Type of spectral normalization to use for the conversion astro_factor : dict or float or None Nuisance factor used to make the conversion. If astro_factor is None, it will use the reference value If astro_factor is a float, it will use that If astro_factor is a dict, it will use that to create a prior Returns ------- output : `DMCastroData` The DM-space likelihood curves """ if not self.check_energy_bins(castro_data.refSpec): raise ValueError("CastroData energy binning does not match") mask = self._s_table['ref_chan'] == channel masses = self._s_table[mask]['ref_mass'].data spec_vals = self._s_table[mask]['ref_%s' % norm_type].data nmass = len(masses) is_decay = channel >= 100 # Get the reference values if is_decay: ref_inter = self._ref_vals["REF_TAU"] ref_norm = self._ref_vals["REF_D"] astro_str = 'd_value' else: ref_inter = self._ref_vals["REF_SIGV"] ref_norm = self._ref_vals["REF_J"] astro_str = 'j_value' astro_prior = None if is_null(astro_factor): # Just use the reference values astro_value = ref_norm norm_factor = 1. elif isinstance(astro_factor, float): # Rescale the normalization values astro_value = astro_factor norm_factor = ref_norm / astro_factor elif isinstance(astro_factor, dict): astro_value = astro_factor.get(astro_str) norm_factor = ref_norm / astro_value astro_factor['scale'] = astro_value astro_functype = astro_factor.get('functype', None) if is_null(astro_functype): astro_prior = None else: astro_prior = create_prior_functor(astro_factor) else: sys.stderr.write( "Did not recoginize Astro factor %s %s\n" % (astro_factor, type(astro_factor))) norm_limits = castro_data.getLimits(1e-5) # This puts the spectrum in units of the reference spectrum # This means that the scan values will be expressed # In units of the reference spectra as well spec_vals /= norm_factor n_scan_pt = 200 norm_vals = np.ndarray((nmass, n_scan_pt)) dll_vals = np.ndarray((nmass, n_scan_pt)) mle_vals = np.ndarray((nmass)) nll_offsets = np.ndarray((nmass)) mass_mask = np.ones((nmass), bool) # for i, mass in enumerate(masses): for i in range(nmass): max_ratio = 1. / ((spec_vals[i] / norm_limits).max()) log_max_ratio = np.log10(max_ratio) norm_vals[i][0] = 10**(log_max_ratio - 5) norm_vals[i][1:] = np.logspace(log_max_ratio - 4, log_max_ratio + 4, n_scan_pt - 1) test_vals = (np.expand_dims( spec_vals[i], 1) * (np.expand_dims(norm_vals[i], 1).T)) dll_vals[i, 0:] = castro_data(test_vals) mle_vals[i] = norm_vals[i][dll_vals[i].argmin()] nll_offsets[i] = dll_vals[i].min() dll_vals[i] -= nll_offsets[i] msk = np.isfinite(dll_vals[i]) if not msk.any(): print ( "Skipping mass %0.2e for channel %s" % (masses[i], channel)) mass_mask[i] = False continue if is_not_null(astro_prior): try: lnlfn = castro.LnLFn(norm_vals[i], dll_vals[i], 'dummy') lnlfn_prior = LnLFn_norm_prior(lnlfn, astro_prior) dll_vals[i, 0:] = lnlfn_prior(norm_vals[i]) nll_offsets[i] = dll_vals[i].min() except ValueError: print ( "Skipping mass %0.2e for channel %s" % (masses[i], channel)) mass_mask[i] = False dll_vals[i, 0:] = np.nan * np.ones((n_scan_pt)) nll_offsets[i] = np.nan # Here we convert the normalization values to standard units norm_vals *= (ref_inter) dm_castro = DMCastroData(norm_vals[mass_mask], dll_vals[mass_mask], nll_offsets[mass_mask], channel, masses[mass_mask], astro_value, astro_prior=astro_prior, ref_astro=ref_norm, ref_inter=ref_inter, decay=is_decay) return dm_castro
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") if is_null(args.skydirs): skydir_dict = None else: skydir_dict = load_yaml(args.skydirs) gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) #gta.setup(overwrite=False) gta.load_roi(args.roi_baseline) gta.print_roi() basedir = os.path.dirname(args.config) # This should be a no-op, b/c it was done in the baseline analysis for profile in args.profiles: if skydir_dict is None: skydir_keys = [None] else: skydir_keys = sorted(skydir_dict.keys()) for skydir_key in skydir_keys: if skydir_key is None: pkey, psrc_name, pdict = build_profile_dict(basedir, profile) else: skydir_val = skydir_dict[skydir_key] pkey, psrc_name, pdict = build_profile_dict(basedir, profile) pdict['ra'] = skydir_val['ra'] pdict['dec'] = skydir_val['dec'] pkey += "_%06i" % skydir_key outfile = "sed_%s.fits" % pkey # Add the source and get the list of correlated soruces correl_dict, test_src_name = add_source_get_correlated(gta, psrc_name, pdict, correl_thresh=0.25, non_null_src=args.non_null_src) # Write the list of correlated sources correl_yaml = os.path.join(basedir, "correl_%s.yaml" % pkey) write_yaml(correl_dict, correl_yaml) gta.free_sources(False) for src_name in correl_dict.keys(): gta.free_source(src_name, pars='norm') # build the SED if args.non_null_src: gta.update_source(test_src_name, reoptimize=True) gta.write_roi("base_%s"% pkey, make_plots=False) gta.sed(test_src_name, prefix=pkey, outfile=outfile, make_plots=args.make_plots) # remove the source gta.delete_source(test_src_name) # put the ROI back to how it was gta.load_xml(args.roi_baseline) return gta
def _map_arguments(self, args): """Map from the top-level arguments to the arguments provided to the indiviudal links """ comp_file = args.get('comp', None) datafile = args.get('data', None) if is_null(comp_file): return if is_null(datafile): return NAME_FACTORY.update_base_dict(args['data']) outdir = args.get('outdir') outkey = args.get('outkey') ft1file = args['ft1file'] ft2file = args['ft2file'] if is_null(outdir) or is_null(outkey): return pfiles = os.path.join(outdir, outkey) self.comp_dict = yaml.safe_load(open(comp_file)) coordsys = self.comp_dict.pop('coordsys') full_out_dir = make_nfs_path(os.path.join(outdir, outkey)) for key_e, comp_e in sorted(self.comp_dict.items()): if 'logebins' in comp_e: ebins_file = make_nfs_path( os.path.join(full_out_dir, 'energy_bins.fits')) write_ebins_file(ebins_file, comp_e['logebins']) else: ebins_file = None enumbins = comp_e['enumbins'] emin = math.pow(10., comp_e['log_emin']) emax = math.pow(10., comp_e['log_emax']) zmax = comp_e['zmax'] zcut = "zmax%i" % comp_e['zmax'] tmin = comp_e.get('tmin', None) tmax = comp_e.get('tmax', None) if is_null(tmin): tmin = 'INDEF' if is_null(tmax): tmax = 'INDEF' evclassstr = NAME_FACTORY.base_dict['evclass'] kwargs_select = dict(zcut=zcut, ebin=key_e, psftype='ALL', coordsys=coordsys) linkname = 'select-energy-%s-%s' % (key_e, zcut) selectfile_energy = make_full_path( outdir, outkey, NAME_FACTORY.select(**kwargs_select)) self._set_link(linkname, Gtlink_select, infile=ft1file, outfile=selectfile_energy, zmax=zmax, tmin=tmin, tmax=tmax, emin=emin, emax=emax, evclass=NAME_FACTORY.evclassmask(evclassstr), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname)) if 'mktimefilters' in comp_e: mktimefilters = comp_e['mktimefilters'] else: mktimefilters = ['none'] for mktimekey in mktimefilters: kwargs_mktime = kwargs_select.copy() kwargs_mktime['mktime'] = mktimekey filterstring = MKTIME_DICT[mktimekey] mktime_file = make_full_path( outdir, outkey, NAME_FACTORY.mktime(**kwargs_mktime)) ltcube_file = make_full_path( outdir, outkey, NAME_FACTORY.ltcube(**kwargs_mktime)) linkname_mktime = 'mktime-%s-%s-%s' % (key_e, zcut, mktimekey) linkname_ltcube = 'ltcube-%s-%s-%s' % (key_e, zcut, mktimekey) self._set_link(linkname_mktime, Gtlink_mktime, evfile=selectfile_energy, outfile=mktime_file, scfile=ft2file, filter=filterstring, pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_mktime)) self._set_link(linkname_ltcube, Gtlink_ltcube, evfile=mktime_file, outfile=ltcube_file, scfile=ft2file, zmax=zmax, pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_ltcube)) if 'evtclasses' in comp_e: evtclasslist_vals = comp_e['evtclasses'] else: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] for evtclassval in evtclasslist_vals: for psf_type, psf_dict in sorted( comp_e['psf_types'].items()): linkname_select = 'select-type-%s-%s-%s-%s-%s' % ( key_e, zcut, mktimekey, evtclassval, psf_type) linkname_bin = 'bin-%s-%s-%s-%s-%s' % ( key_e, zcut, mktimekey, evtclassval, psf_type) kwargs_bin = kwargs_mktime.copy() kwargs_bin['psftype'] = psf_type kwargs_bin['coordsys'] = coordsys kwargs_bin['evclass'] = evtclassval selectfile_psf = make_full_path( outdir, outkey, NAME_FACTORY.select(**kwargs_bin)) binfile_psf = make_full_path( outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin)) hpx_order_psf = min(args['hpx_order_max'], psf_dict['hpx_order']) linkname_select = 'select-type-%s-%s-%s-%s-%s' % ( key_e, zcut, mktimekey, evtclassval, psf_type) linkname_bin = 'bin-%s-%s-%s-%s-%s' % ( key_e, zcut, mktimekey, evtclassval, psf_type) self._set_link( linkname_select, Gtlink_select, infile=selectfile_energy, outfile=selectfile_psf, zmax=zmax, emin=emin, emax=emax, tmin=tmin, tmax=tmax, evtype=EVT_TYPE_DICT[psf_type], evclass=NAME_FACTORY.evclassmask(evtclassval), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_select)) print("xx", ebins_file) if ebins_file is None: self._set_link(linkname_bin, Gtlink_bin, coordsys=coordsys, hpx_order=hpx_order_psf, evfile=selectfile_psf, outfile=binfile_psf, emin=emin, emax=emax, enumbins=enumbins, pfiles=pfiles, logfile=os.path.join( full_out_dir, "%s.log" % linkname_bin)) else: self._set_link(linkname_bin, Gtlink_bin, coordsys=coordsys, hpx_order=hpx_order_psf, evfile=selectfile_psf, outfile=binfile_psf, ebinalg='FILE', ebinfile=ebins_file, pfiles=pfiles, logfile=os.path.join( full_out_dir, "%s.log" % linkname_bin))