예제 #1
0
def main():
        
    usage = "usage: %(prog)s [config file]"
    description = "Run fermipy analysis chain."
    parser = argparse.ArgumentParser(usage=usage,description=description)

    parser.add_argument('--config', default = 'sample_config.yaml')
    parser.add_argument('--source', default = None)

    args = parser.parse_args()
    gta = GTAnalysis(args.config,logging={'verbosity' : 3},
                     fileio={'workdir_regex' : ['\.xml$|\.npy$']})

    logfile = os.path.join(gta.outdir,'run-region-analysis.log')
    if not check_log(logfile)=='Successful':
        print('Region analysis incomplete.  Exiting.')
        sys.exit(1)
        
    gta.setup()

    halo_width = np.logspace(-1.25,0.25,13)
    halo_index = np.array([1.5,1.75,2.0,2.25,2.5,2.75,3.0,3.25,3.5])
    src_name = gta.roi.sources[0].name

    for i in range(1,6):

        npy_file = os.path.join(gta.workdir,'fit%i.npy'%i)

        if not os.path.isfile(npy_file):
            continue

        gta.load_roi('fit%i'%i,reload_sources=True)
        fit_halo_scan(gta,'fit%i'%i,src_name,
                      halo_width,halo_index,optimizer='NEWTON')
예제 #2
0
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError("Trying to run fermipy analysis, but don't have ST")

        gta = GTAnalysis(args.config,
                         logging={'verbosity': 3},
                         fileio={'workdir_regex': '\.xml$|\.npy$'})
        gta.setup(overwrite=False)
        gta.load_roi('fit_baseline')
        gta.print_roi()

        basedir = os.path.dirname(args.config)
        # This should be a no-op, b/c it was done in the baseline analysis

        gta.free_sources(skydir=gta.roi.skydir, distance=1.0, pars='norm')

        for profile in args.profiles:
            pkey, pdict = SEDAnalysis._build_profile_dict(basedir, profile)
            # test_case need to be a dict with spectrum and morphology
            gta.add_source(pkey, pdict)
            # refit the ROI
            gta.fit()
            # build the SED
            gta.sed(pkey, outfile="sed_%s.fits" % pkey)
            # remove the source
            gta.delete_source(pkey)
            # put the ROI back to how it was
            gta.load_xml('fit_baseline')

        return gta
예제 #3
0
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError(
                "Trying to run fermipy analysis, but don't have ST")

        workdir = os.path.dirname(args.config)
        _config_file = self._clone_config_and_srcmaps(args.config, args.seed)

        gta = GTAnalysis(_config_file,
                         logging={'verbosity': 3},
                         fileio={'workdir_regex': '\.xml$|\.npy$'})
        gta.load_roi(args.roi_baseline)

        simfile = os.path.join(workdir,
                               'sim_%s_%s.yaml' % (args.sim, args.sim_profile))

        mcube_file = "%s_%s_%06i" % (args.sim, args.sim_profile, args.seed)
        sim_config = utils.load_yaml(simfile)

        injected_source = sim_config.get('injected_source', None)
        if injected_source is not None:
            src_dict = injected_source['source_model']
            src_dict['ra'] = gta.config['selection']['ra']
            src_dict['dec'] = gta.config['selection']['dec']
            injected_name = injected_source['name']
            gta.add_source(injected_name, src_dict)
            gta.write_model_map(mcube_file)
            mc_spec_dict = dict(
                true_counts=gta.model_counts_spectrum(injected_name),
                energies=gta.energies,
                model=src_dict)
            mcspec_file = os.path.join(
                workdir, "mcspec_%s_%06i.yaml" % (mcube_file, args.seed))
            utils.write_yaml(mc_spec_dict, mcspec_file)
        else:
            injected_name = None

        gta.write_roi('sim_baseline_%06i' % args.seed)

        test_sources = {}
        for profile in args.profiles:
            profile_path = os.path.join(workdir, 'profile_%s.yaml' % profile)
            test_source = load_yaml(profile_path)
            test_sources[profile] = test_source
            first = args.seed
            last = first + args.nsims
            for seed in range(first, last):
                self._run_simulation(gta,
                                     args.roi_baseline,
                                     injected_name,
                                     test_sources,
                                     first,
                                     seed,
                                     non_null_src=args.non_null_src,
                                     do_find_src=args.do_find_src)
예제 #4
0
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError(
                "Trying to run fermipy analysis, but don't have ST")

        workdir = os.path.dirname(args.config)
        _config_file = self._clone_config_and_srcmaps(args.config, args.seed)

        gta = GTAnalysis(_config_file, logging={'verbosity': 3},
                         fileio={'workdir_regex': '\.xml$|\.npy$'})
        gta.load_roi(args.roi_baseline)

        simfile = os.path.join(workdir, 'sim_%s_%s.yaml' %
                               (args.sim, args.sim_profile))

        mcube_file = "%s_%s_%06i" % (args.sim, args.sim_profile, args.seed)
        sim_config = utils.load_yaml(simfile)

        injected_source = sim_config.get('injected_source', None)
        if injected_source is not None:
            src_dict =  injected_source['source_model']
            src_dict['ra'] = gta.config['selection']['ra']
            src_dict['dec'] = gta.config['selection']['dec']
            injected_name = injected_source['name']
            gta.add_source(injected_name, src_dict)
            gta.write_model_map(mcube_file)
            mc_spec_dict = dict(true_counts=gta.model_counts_spectrum(injected_name),
                                energies=gta.energies,
                                model=src_dict)
            mcspec_file = os.path.join(workdir,
                                       "mcspec_%s_%06i.yaml" % (mcube_file, args.seed))
            utils.write_yaml(mc_spec_dict, mcspec_file)
        else:
            injected_name = None

        gta.write_roi('sim_baseline_%06i' % args.seed)

        test_sources = []
        for profile in args.profiles:
            profile_path = os.path.join(workdir, 'profile_%s.yaml' % profile)
            test_source = load_yaml(profile_path)
            test_sources.append(test_source)
            first = args.seed
            last = first + args.nsims
            for seed in range(first, last):
                self._run_simulation(gta, args.roi_baseline,
                                     injected_name, test_sources, first, seed,
                                     non_null_src=args.non_null_src)
예제 #5
0
파일: fitting.py 프로젝트: tuoyl/fermipy
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError(
                "Trying to run fermipy analysis, but don't have ST")

        if args.load_baseline:
            gta = GTAnalysis.create(args.roi_baseline, args.config)
        else:
            gta = GTAnalysis(args.config,
                             logging={'verbosity': 3},
                             fileio={'workdir_regex': '\.xml$|\.npy$'})
            gta.setup()
            if is_not_null(args.input_pars):
                gta.load_parameters_from_yaml(args.input_pars)
            gta.write_roi(args.roi_baseline,
                          save_model_map=True,
                          save_weight_map=True,
                          make_plots=args.make_plots)

        src_list = get_src_names(gta)
        plotter = plotting.AnalysisPlotter(gta.config['plotting'],
                                           fileio=gta.config['fileio'],
                                           logging=gta.config['logging'])

        if is_null(args.fit_strategy):
            return

        fit_strategy = load_yaml(args.fit_strategy)
        npred_current = None
        npred_prev = None

        plots_only = False

        for fit_stage in fit_strategy:
            mask = fit_stage.get('mask', None)
            npred_threshold = fit_stage.get('npred_threshold', 1.0e4)
            frac_threshold = fit_stage.get('frac_threshold', 0.5)
            npred_frac = fit_stage.get('npred_frac', 0.9999)

            if plots_only:
                gta.load_roi("%s.npy" % fit_stage['key'])
                npred_current = set_wts_get_npred_wt(gta, mask)
                skip_list_region = get_unchanged(src_list,
                                                 npred_current,
                                                 npred_prev,
                                                 frac_threshold=frac_threshold)
            else:
                npred_current = set_wts_get_npred_wt(gta, mask)
                skip_list_region = get_unchanged(src_list,
                                                 npred_current,
                                                 npred_prev,
                                                 frac_threshold=frac_threshold)
                gta.optimize(npred_frac=npred_frac,
                             npred_threshold=npred_threshold,
                             skip=skip_list_region)

            snapshot(gta,
                     plotter,
                     fit_stage['key'],
                     make_plots=args.make_plots)
            npred_prev = npred_current
            npred_current = build_srcdict(gta, 'npred_wt')
예제 #6
0
class ExtensionFit:
    def __init__(self, configFile):

        self.gta = GTAnalysis(configFile, logging={'verbosity': 3})
        self.target = None
        self.targetRadius = None
        self.distance = None
        self.catalog = fits.getdata('/users-data/mfalxa/code/gll_psch_v13.fit',
                                    1)

    def setSourceName(self, sourceObject, newName):
        self.gta.delete_source(sourceObject['name'])
        self.gta.add_source(newName, sourceObject)

    ''' INITIALIZE '''

    def initialize(self, sizeROI, rInner, addToROI, TSMin, debug):

        self.gta.setup()
        if self.gta.config['selection']['emin'] >= 10000:
            self.gta.set_parameter('galdiff', 'Scale', 30000)

        if debug == True:
            self.gta.make_plots('startAll')
            self.gta.residmap(prefix='startAll', make_plots=True)

        # Get model source names
        sourceList = self.gta.get_sources(exclude=['isodiff', 'galdiff'])

        # Delete sources unassociated with TS < 50
        for i in range(len(sourceList)):
            if sourceList[i]['catalog']['TS_value'] < TSMin and self.catalog[
                    'CLASS'][self.catalog['Source_Name'] == sourceList[i]
                             ['name']][0] == '':
                self.gta.delete_source(sourceList[i]['name'])

        closests = self.gta.get_sources(distance=rInner,
                                        exclude=['isodiff', 'galdiff'])

        # Delete all unidentified sources
        for i in range(len(closests)):
            if self.catalog['CLASS'][self.catalog['Source_Name'] == closests[i]
                                     ['name']][0].isupper() == False:
                self.gta.delete_source(closests[i]['name'])
            if self.catalog['CLASS'][self.catalog['Source_Name'] == closests[i]
                                     ['name']][0] == 'SFR':
                self.target = closests[i]
                self.setSourceName(self.target, 'TESTSOURCE')

# If debug, save ROI and make plots
        if debug == True:
            self.gta.write_roi('startModel')
            self.gta.residmap(prefix='start', make_plots=True)
            self.gta.make_plots('start')

        # Optmize spectral parameters for sources with npred > 1
        self.gta.optimize(npred_threshold=1, skip=['isodiff'])

        # Get model source names
        sourceList = self.gta.get_sources(distance=sizeROI + addToROI,
                                          square=True,
                                          exclude=['isodiff', 'galdiff'])

        # Iterate source localizing on source list
        for i in range(len(sourceList)):
            if sourceList[i].extended == False:
                self.gta.localize(sourceList[i]['name'],
                                  write_fits=False,
                                  write_npy=False,
                                  update=True)

        # Free sources within ROI size + extra distance from center
        self.gta.free_sources(distance=sizeROI + addToROI, square=True)

        # Re-optimize ROI
        self.gta.optimize(skip=['isodiff'])

        # Save and make plots if debug
        if debug == True:
            self.gta.write_roi('modelInitialized')
            self.gta.residmap(prefix='initialized', make_plots=True)
            self.gta.make_plots('initialized')

        # Lock sources
        self.gta.free_sources(free=False)

    ''' OUTER REGION '''
    def outerRegionAnalysis(self, sizeROI, rInner, sqrtTsThreshold,
                            minSeparation, debug):

        self.gta.free_sources(distance=sizeROI,
                              pars='norm',
                              square=True,
                              free=True)
        self.gta.free_sources(distance=rInner, free=False)
        self.gta.free_source('galdiff', free=True)
        self.gta.free_source('isodiff', free=False)

        # Seek new sources until none are found
        sourceModel = {
            'SpectrumType': 'PowerLaw',
            'Index': 2.0,
            'Scale': 30000,
            'Prefactor': 1.e-15,
            'SpatialModel': 'PointSource'
        }
        newSources = self.gta.find_sources(sqrt_ts_threshold=sqrtTsThreshold,
                                           min_separation=minSeparation,
                                           model=sourceModel,
                                           **{
                                               'search_skydir':
                                               self.gta.roi.skydir,
                                               'search_minmax_radius':
                                               [rInner, sizeROI]
                                           })

        if len(newSources) > 0:
            for i in range(len(newSources)):
                if newSources['sources'][i]['ts'] > 100.:
                    self.gta.set_source_spectrum(
                        newSources['sources'][i]['name'],
                        spectrum_type='LogParabola')
                    self.gta.free_source(newSources['sources'][i]['name'])
                    self.gta.fit()
                    self.gta.free_source(newSources['sources'][i]['name'],
                                         free=False)

        # Optimize all ROI
        self.gta.optimize(skip=['isodiff'])

        # Save sources found
        if debug == True:
            self.gta.residmap(prefix='outer', make_plots=True)
            self.gta.write_roi('outerAnalysisROI')
            self.gta.make_plots('outer')

    ''' INNER REGION '''

    def innerRegionAnalysis(self, sizeROI, rInner, maxIter, sqrtTsThreshold,
                            minSeparation, dmMin, TSm1Min, TSextMin, debug):

        self.gta.free_sources(distance=sizeROI, square=True, free=False)
        self.gta.free_sources(distance=rInner, free=True, exclude=['isodiff'])

        # Keep closest source if identified with star forming region in catalog or look for new source closest to center within Rinner
        if self.target != None:
            print('Closest source identified with star forming region : ',
                  self.target['name'])
            self.gta.set_source_morphology('TESTSOURCE',
                                           **{'spatial_model': 'PointSource'})
        else:
            closeSources = self.gta.find_sources(sqrt_ts_threshold=2.,
                                                 min_separation=minSeparation,
                                                 max_iter=1,
                                                 **{
                                                     'search_skydir':
                                                     self.gta.roi.skydir,
                                                     'search_minmax_radius':
                                                     [0., rInner]
                                                 })
            dCenter = np.array([])
            for i in range(len(closeSources['sources'])):
                dCenter = np.append(
                    dCenter,
                    self.gta.roi.skydir.separation(
                        closeSources['sources'][i].skydir).value)
            self.target = closeSources['sources'][np.argmin(dCenter)]
            print('Target name : ', self.target['name'])
            self.setSourceName(self.target, 'TESTSOURCE')
            for i in [
                    x for x in range(len(closeSources['sources']))
                    if x != (np.argmin(dCenter))
            ]:
                self.gta.delete_source(closeSources['sources'][i]['name'])
            self.gta.optimize(skip=['isodiff'])

        # Initialize n sources array
        nSources = []

        # Save ROI without extension fit
        self.gta.write_roi('nSourcesFit')

        if debug == True:
            self.gta.make_plots('innerInit')
            self.gta.residmap(prefix='innerInit', make_plots=True)

        # Test for extension
        extensionTest = self.gta.extension('TESTSOURCE',
                                           make_plots=True,
                                           write_npy=debug,
                                           write_fits=debug,
                                           spatial_model='RadialDisk',
                                           update=True,
                                           free_background=True,
                                           fit_position=True)
        extLike = extensionTest['loglike_ext']
        TSext = extensionTest['ts_ext']
        print('TSext : ', TSext)
        extAIC = 2 * (len(self.gta.get_free_param_vector()) -
                      self.gta._roi_data['loglike'])
        self.gta.write_roi('extFit')

        if debug == True:
            self.gta.residmap(prefix='ext0', make_plots=True)
            self.gta.make_plots('ext0')

        self.gta.load_roi('nSourcesFit', reload_sources=True)

        for i in range(1, maxIter + 1):

            # Test for n point sources
            nSourcesTest = self.gta.find_sources(
                sources_per_iter=1,
                sqrt_ts_threshold=sqrtTsThreshold,
                min_separation=minSeparation,
                max_iter=1,
                **{
                    'search_skydir': self.gta.roi.skydir,
                    'search_minmax_radius': [0., rInner]
                })

            if len(nSourcesTest['sources']) > 0:

                if nSourcesTest['sources'][0]['ts'] > 100.:
                    self.gta.set_source_spectrum(
                        nSourcesTest['sources'][0]['name'],
                        spectrum_type='LogParabola')
                    self.gta.free_source(nSourcesTest['sources'][0]['name'])
                    self.gta.fit()
                    self.gta.free_source(nSourcesTest['sources'][0]['name'],
                                         free=False)

                if debug == True:
                    self.gta.make_plots('nSources' + str(i))

                nSources.append(nSourcesTest['sources'])
                self.gta.localize(nSourcesTest['sources'][0]['name'],
                                  write_npy=debug,
                                  write_fits=debug,
                                  update=True)
                nAIC = 2 * (len(self.gta.get_free_param_vector()) -
                            self.gta._roi_data['loglike'])
                self.gta.free_source(nSourcesTest['sources'][0]['name'],
                                     free=True)
                self.gta.residmap(prefix='nSources' + str(i), make_plots=True)
                self.gta.write_roi('n1SourcesFit')

                # Estimate Akaike Information Criterion difference between both models
                dm = extAIC - nAIC
                print('AIC difference between both models = ', dm)

                # Estimate TS_m+1
                extensionTestPlus = self.gta.extension(
                    'TESTSOURCE',
                    make_plots=True,
                    write_npy=debug,
                    write_fits=debug,
                    spatial_model='RadialDisk',
                    update=True,
                    free_background=True,
                    fit_position=True)
                TSm1 = 2 * (extensionTestPlus['loglike_ext'] - extLike)
                print('TSm+1 = ', TSm1)

                if debug == True:
                    self.gta.residmap(prefix='ext' + str(i), make_plots=True)
                    self.gta.make_plots('ext' + str(i))

                if dm < dmMin and TSm1 < TSm1Min:
                    self.gta.load_roi('extFit', reload_sources=True)
                    break
                else:

                    # Set extension test to current state and save current extension fit ROI and load previous nSources fit ROI
                    extensionTest = extensionTestPlus
                    extLike = extensionTestPlus['loglike_ext']
                    TSext = extensionTestPlus['ts_ext']
                    print('TSext : ', TSext)
                    extAIC = 2 * (len(self.gta.get_free_param_vector()) -
                                  self.gta._roi_data['loglike'])
                    self.gta.write_roi('extFit')
                    self.gta.load_roi('n1SourcesFit', reload_sources=True)
                    self.gta.write_roi('nSourcesFit')

            else:
                if TSext > TSextMin:
                    self.gta.load_roi('extFit', reload_sources=True)
                    break
                else:
                    self.gta.load_roi('nSourcesFit', reload_sources=True)
                    break

        self.gta.fit()

        # Get source radius depending on spatial model
        endSources = self.gta.get_sources()
        for i in range(len(endSources)):
            if endSources[i]['name'] == 'TESTSOURCE':
                self.target = endSources[i]
                self.distance = self.gta.roi.skydir.separation(
                    endSources[i].skydir).value
                if endSources[i].extended == True:
                    self.targetRadius = endSources[i]['SpatialWidth']
                else:
                    self.targetRadius = endSources[i]['pos_r95']

    ''' CHECK OVERLAP '''

    def overlapDisk(self, rInner, radiusCatalog):

        print('Target radius : ', self.targetRadius)

        # Check radius sizes
        if radiusCatalog < self.targetRadius:
            r = float(radiusCatalog)
            R = float(self.targetRadius)
        else:
            r = float(self.targetRadius)
            R = float(radiusCatalog)

        # Estimating overlapping area
        d = self.distance
        print('Distance from center : ', d)

        if d < (r + R):
            if R < (r + d):
                area = r**2 * np.arccos(
                    (d**2 + r**2 - R**2) / (2 * d * r)) + R**2 * np.arccos(
                        (d**2 + R**2 - r**2) / (2 * d * R)) - 0.5 * np.sqrt(
                            (-d + r + R) * (d + r - R) * (d - r + R) *
                            (d + r + R))
                overlap = round((area / (np.pi * r**2)) * 100, 2)
            else:
                area = np.pi * r**2
                overlap = 100.0
        else:
            area = 0.
            overlap = 0.

        print('Overlapping surface : ', area)
        print('Overlap : ', overlap)

        if overlap > 68. and self.distance < rInner:
            associated = True
        else:
            associated = False

        return associated

    ''' CHECK UPPER LIMIT '''

    def upperLimit(self, name, radius):
        sourceModel = {
            'SpectrumType': 'PowerLaw',
            'Index': 2.0,
            'Scale': 30000,
            'Prefactor': 1.e-15,
            'SpatialModel': 'RadialDisk',
            'SpatialWidth': radius,
            'glon': self.gta.config['selection']['glon'],
            'glat': self.gta.config['selection']['glat']
        }
        self.gta.add_source(name, sourceModel, free=True)
        self.gta.fit()
        self.gta.residmap(prefix='upperLimit', make_plots=True)
        print('Upper limit : ', self.gta.get_sources()[0]['flux_ul95'])
예제 #7
0
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError(
                "Trying to run fermipy analysis, but don't have ST")

        if is_null(args.skydirs):
            skydir_dict = None
        else:
            skydir_dict = load_yaml(args.skydirs)

        gta = GTAnalysis(args.config,
                         logging={'verbosity': 3},
                         fileio={'workdir_regex': '\.xml$|\.npy$'})
        #gta.setup(overwrite=False)
        gta.load_roi(args.roi_baseline)
        gta.print_roi()

        basedir = os.path.dirname(args.config)
        # This should be a no-op, b/c it was done in the baseline analysis

        for profile in args.profiles:
            if skydir_dict is None:
                skydir_keys = [None]
            else:
                skydir_keys = sorted(skydir_dict.keys())

            for skydir_key in skydir_keys:
                if skydir_key is None:
                    pkey, pdict = AnalyzeSED._build_profile_dict(
                        basedir, profile)
                else:
                    skydir_val = skydir_dict[skydir_key]
                    pkey, pdict = AnalyzeSED._build_profile_dict(
                        basedir, profile)
                    pdict['ra'] = skydir_val['ra']
                    pdict['dec'] = skydir_val['dec']
                    pkey += "_%06i" % skydir_key

                outfile = "sed_%s.fits" % pkey

                # Add the source and get the list of correlated soruces
                correl_dict = add_source_get_correlated(gta,
                                                        pkey,
                                                        pdict,
                                                        correl_thresh=0.25)

                # Write the list of correlated sources
                correl_yaml = os.path.join(basedir, "correl_%s.yaml" % pkey)
                write_yaml(correl_dict, correl_yaml)

                gta.free_sources(False)
                for src_name in correl_dict.keys():
                    gta.free_source(src_name, pars='norm')

                # build the SED
                gta.sed(pkey, outfile=outfile, make_plots=args.make_plots)

                # remove the source
                gta.delete_source(pkey)
                # put the ROI back to how it was
                gta.load_xml(args.roi_baseline)

        return gta
예제 #8
0
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError(
                "Trying to run fermipy analysis, but don't have ST")

        if args.load_baseline:
            gta = GTAnalysis.create(args.roi_baseline,
                                    args.config)
        else:
            gta = GTAnalysis(args.config,
                             logging={'verbosity': 3},
                             fileio={'workdir_regex': '\.xml$|\.npy$'})
            gta.setup()
            if is_not_null(args.input_pars):
                gta.load_parameters_from_yaml(args.input_pars)
            gta.write_roi(args.roi_baseline,
                          save_model_map=True,
                          save_weight_map=True,
                          make_plots=args.make_plots)

        src_list = get_src_names(gta)
        plotter = plotting.AnalysisPlotter(gta.config['plotting'],
                                           fileio=gta.config['fileio'],
                                           logging=gta.config['logging'])

        if is_null(args.fit_strategy):
            return

        fit_strategy = load_yaml(args.fit_strategy)
        npred_current = None
        npred_prev = None
        
        plots_only = False

        for fit_stage in fit_strategy:
            mask = fit_stage.get('mask', None)
            npred_threshold = fit_stage.get('npred_threshold', 1.0e4)
            frac_threshold = fit_stage.get('frac_threshold', 0.5)
            npred_frac = fit_stage.get('npred_frac', 0.9999)

            if plots_only:
                gta.load_roi("%s.npy" % fit_stage['key'])
                npred_current =  set_wts_get_npred_wt(gta, mask)
                skip_list_region = get_unchanged(src_list,
                                                 npred_current,
                                                 npred_prev,
                                                 frac_threshold=frac_threshold)
            else:
                npred_current =  set_wts_get_npred_wt(gta, mask)
                skip_list_region = get_unchanged(src_list,
                                                 npred_current,
                                                 npred_prev,
                                                 frac_threshold=frac_threshold)     
                gta.optimize(npred_frac=npred_frac, 
                             npred_threshold=npred_threshold,
                             skip=skip_list_region)
            
            snapshot(gta, plotter, fit_stage['key'], make_plots=args.make_plots)
            npred_prev = npred_current
            npred_current = build_srcdict(gta, 'npred_wt')
예제 #9
0
def main():
    usage = "usage: %(prog)s -c config.yaml"
    description = "Run the lc analysis"
    parser = argparse.ArgumentParser(usage=usage, description=description)
    parser.add_argument('-c', '--conf', required=True)
    parser.add_argument('-i',
                        required=False,
                        default=0,
                        help='Set local or scratch calculation',
                        type=int)
    parser.add_argument('--state',
                        help='analysis state',
                        choices=['avgspec', 'setup'],
                        default='avgspec')
    parser.add_argument('--forcepl',
                        default=0,
                        help='Force the target source to have power-law shape',
                        type=int)
    parser.add_argument('--createsed',
                        default=0,
                        help='Create SED from best fit model',
                        type=int)
    parser.add_argument(
        '--adaptive',
        default=0,
        help='Use adaptive binning for minute scale light curves',
        type=int)
    parser.add_argument('--srcprob', default = 0,
                        help='Calculate the source probability for the photons,' \
                            ' only works when no sub orbit time scales are used',
                        type=int)
    parser.add_argument(
        '--mincounts',
        default=2,
        help='Minimum number of counts within LC bin to run analysis',
        type=int)
    parser.add_argument('--simulate', default = None,
                        help='None or full path to yaml file which contains src name' \
                        'and spec to be simulated',
                        )
    parser.add_argument(
        '--make_plots',
        default=0,
        type=int,
        help='Create sed plot',
    )
    parser.add_argument(
        '--randomize',
        default=1,
        help=
        'If you simulate, use Poisson realization. If false, use Asimov data set',
        type=int)
    args = parser.parse_args()

    utils.init_logging('DEBUG')
    config = yaml.load(open(args.conf))
    tmpdir, job_id = lsf.init_lsf()
    if not job_id:
        job_id = args.i
    logging.info('tmpdir: {0:s}, job_id: {1:n}'.format(tmpdir, job_id))
    os.chdir(tmpdir)  # go to tmp directory
    logging.info('Entering directory {0:s}'.format(tmpdir))
    logging.info('PWD is {0:s}'.format(os.environ["PWD"]))

    # copy the ft1,ft2 and ltcube files
    #for k in ['evfile','scfile','ltcube']:
    # don't stage them, done automatically by fermipy if needed
    #        config[k] = utils.copy2scratch(config[k], tmpdir)
    # set the scratch directories
    logging.debug(config['data'])
    config['fileio']['scratchdir'] = tmpdir

    # set the log file
    logdir = copy.deepcopy(config['fileio']['logfile'])
    config['fileio']['logfile'] = path.join(tmpdir, 'fermipy.log')
    # debugging: all files will be saved (default is False)
    #config['fileio']['savefits'] = True

    # if simulating an orbit, save fits files
    if args.simulate is not None:
        config['fileio']['savefits'] = True

    # copy all fits files already present in outdir
    # run the analysis
    lc_config = copy.deepcopy(config['lightcurve'])
    fit_config = copy.deepcopy(config['fit_pars'])

    # remove parameters from config file not accepted by fermipy
    for k in ['configname', 'tmp', 'log', 'fit_pars']:
        config.pop(k, None)
    if 'adaptive' in config['lightcurve'].keys():
        config['lightcurve'].pop('adaptive', None)

    # set the correct time bin
    config['selection']['tmin'], config['selection']['tmax'], nj = set_lc_bin(
        config['selection']['tmin'],
        config['selection']['tmax'],
        config['lightcurve']['binsz'],
        job_id - 1 if job_id > 0 else 0,
        ft1=config['data']['evfile'])
    logging.debug('setting light curve bin' + \
        '{0:n}, between {1[tmin]:.0f} and {1[tmax]:.0f}'.format(job_id, config['selection']))
    if args.adaptive:
        config['fileio']['outdir'] = utils.mkdir(
            path.join(config['fileio']['outdir'],
                      'adaptive{0:.0f}/'.format(lc_config['adaptive'])))

    if args.state == 'setup':
        config['fileio']['outdir'] = utils.mkdir(
            path.join(config['fileio']['outdir'],
                      'setup{0:05n}/'.format(job_id if job_id > 0 else 1)))
    else:
        config['fileio']['outdir'] = utils.mkdir(
            path.join(config['fileio']['outdir'],
                      '{0:05n}/'.format(job_id if job_id > 0 else 1)))

    logging.info('Starting with fermipy analysis')
    logging.info('using fermipy version {0:s}'.format(fermipy.__version__))
    logging.info('located at {0:s}'.format(fermipy.__file__))

    if config['data']['ltcube'] == '':
        config['data'].pop('ltcube', None)

    compute_sub_gti_lc = False
    if type(config['lightcurve']['binsz']) == str:
        if len(config['lightcurve']['binsz'].strip('gti')):
            compute_sub_gti_lc = True
            if config['lightcurve']['binsz'].find('min') > 0:
                config['lightcurve']['binsz'] = float(
                    config['lightcurve']['binsz'].strip('gti').strip(
                        'min')) * 60.
                logging.info("set time bin length to {0:.2f}s".format(
                    config['lightcurve']['binsz']))
        else:
            config['lightcurve']['binsz'] = 3. * 3600.
    try:
        gta = GTAnalysis(config, logging={'verbosity': 3})
    except Exception as e:
        logging.error("{0}".format(e))
        config['selection']['target'] = None
        gta = GTAnalysis(config, logging={'verbosity': 3})
        sep = gta.roi.sources[0]['offset']
        logging.warning(
            "Source closets to ROI center is {0:.3f} degree away".format(sep))
        if sep < 0.1:
            config['selection']['target'] = gta.roi.sources[0]['name']
            gta.config['selection']['target'] = config['selection']['target']
            logging.info("Set target to {0:s}".format(
                config['selection']['target']))

    # stage the full time array analysis results to the tmp dir
    # do not copy png images
    files = [
        fn for fn in glob(fit_config['avgspec'])
        if fn.find('.xml') > 0 or fn.find('.npy') > 0
    ]
    files += [config['data']['evfile']]
    utils.copy2scratch(files, gta.workdir)

    # we're using actual data
    if args.simulate is None:
        # check before the analysis start if there are any events in the master file
        # in the specified time range
        logging.info('Checking for events in initial ft1 file')
        t = Table.read(path.join(gta.workdir,
                                 path.basename(config['data']['evfile'])),
                       hdu='EVENTS')
        logging.info("times in base ft1: {0} {1} {2}".format(
            t["TIME"].max(), t["TIME"].min(),
            t["TIME"].max() - t["TIME"].min()))
        m = (t["TIME"] >= config['selection']['tmin']) & (
            t["TIME"] <= config['selection']['tmax'])
        if np.sum(m) < args.mincounts + 1:
            logging.error(
                "*** Only {0:n} events between tmin and tmax! Exiting".format(
                    np.sum(m)))
            assert np.sum(m) > args.mincounts
        else:
            logging.info("{0:n} events between tmin and tmax".format(
                np.sum(m)))

        # check how many bins are in each potential light curve bin
        if compute_sub_gti_lc:
            # select time of first and last
            # photon instead of GTI time
            m = (t["TIME"] >= config['selection']['tmin']) & \
                 (t["TIME"] <= config['selection']['tmax'])

            tmin = t["TIME"][m].min() - 1.
            tmax = t["TIME"][m].max() + 1.
            logging.info("There will be up to {0:n} time bins".format(np.ceil(
                (tmax - tmin) / \
                config['lightcurve']['binsz'])))

            bins = np.arange(tmin, tmax, config['lightcurve']['binsz'])
            bins = np.concatenate([bins, [config['selection']['tmax']]])
            counts = calc_counts(t, bins)
            # remove the starting times of the bins with zero counts
            # and rebin the data
            logging.info("Counts before rebinning: {0}".format(counts))
            mincounts = 10.
            mc = counts < mincounts
            if np.sum(mc):
                # remove trailing zeros
                if np.any(counts == 0.):
                    mcounts_post, mcounts_pre = rm_trailing_zeros(counts)
                    counts = counts[mcounts_post & mcounts_pre]
                    bins = np.concatenate([
                        bins[:-1][mcounts_post & mcounts_pre],
                        [bins[1:][mcounts_post & mcounts_pre].max()]
                    ])
                bins = rebin(counts, bins)
                logging.info("Bin lengths after rebinning: {0}".format(
                    np.diff(bins)))
                logging.info("Bin times after rebinning: {0}".format(bins))
                counts = calc_counts(t, bins)
                logging.info("Counts after rebinning: {0}".format(counts))
            else:
                logging.info("Regular time binning will be used")
            bins = list(bins)

    logging.info('Running fermipy setup')
    try:
        gta.setup()
    except (RuntimeError, IndexError) as e:
        logging.error(
            'Caught Runtime/Index Error while initializing analysis object')
        logging.error('Printing error:')
        logging.error(e)
        if e.message.find("File not found") >= 0 and e.message.find(
                'srcmap') >= 0:
            logging.error("*** Srcmap calculation failed ***")
        if e.message.find("NDSKEYS") >= 0 and e.message.find('srcmap') >= 0:
            logging.error(
                "*** Srcmap calculation failed with NDSKEYS keyword not found in header ***"
            )

        logging.info("Checking if there are events in ft1 file")
        ft1 = path.join(gta.workdir, 'ft1_00.fits')
        f = glob(ft1)
        if not len(f):
            logging.error(
                "*** no ft1 file found at location {0:s}".format(ft1))
            raise
        t = Table.read(f[0], hdu='EVENTS')
        if not len(t):
            logging.error("*** The ft1 file contains no events!! ***".format(
                len(t)))
        else:
            logging.info("The ft1 file contains {0:n} event(s)".format(len(t)))
        return

    # end here if you only want to calulate
    # intermediate fits files
    if args.state == 'setup':
        return gta

    logging.info('Loading the fit for the average spectrum')
    gta.load_roi('avgspec')  # reload the average spectral fit
    logging.info('Running fermipy optimize and fit')

    # we're using actual data
    if args.simulate is None:
        if args.forcepl:
            gta = set_src_spec_pl(
                gta, gta.get_source_name(config['selection']['target']))
# to do add EBL absorption at some stage ...
#        gta = add_ebl_atten(gta, gta.get_source_name(config['selection']['target']), fit_config['z'])

# make sure you are fitting data
        gta.simulate_roi(restore=True)

        if compute_sub_gti_lc:
            if args.adaptive:
                # do import only here since root must be compiled
                from fermiAnalysis import adaptivebinning as ab
                # compute the exposure
                energy = 1000.
                texp, front, back = ab.comp_exposure_phi(gta, energy=1000.)
                # compute the bins
                result = ab.time_bins(
                    gta,
                    texp,
                    0.5 * (front + back),
                    #critval = 20., # bins with ~20% unc
                    critval=lc_config['adaptive'],
                    Epivot=None,  # compute on the fly
                    #                        tstart = config['selection']['tmin'],
                    #                        tstop = config['selection']['tmax']
                )

                # cut the bins to this GTI
                mask = result['tstop'] > config['selection']['tmin']
                mask = mask & (result['tstart'] < config['selection']['tmax'])

                # try again with catalog values
                if not np.sum(mask):
                    logging.error(
                        "Adaptive bins outside time window, trying catalog values for flux"
                    )
                    result = ab.time_bins(
                        gta,
                        texp,
                        0.5 * (front + back),
                        critval=lc_config['adaptive'],  # bins with ~20% unc
                        Epivot=None,  # compute on the fly
                        forcecatalog=True,
                        #                        tstart = config['selection']['tmin'],
                        #                        tstop = config['selection']['tmax']
                    )

                    # cut the bins to this GTI
                    mask = result['tstop'] > config['selection']['tmin']
                    mask = mask & (result['tstart'] <
                                   config['selection']['tmax'])
                    if not np.sum(mask):
                        logging.error(
                            "Adaptive bins do not cover selected time interval!"
                        )
                        logging.error("Using original bins")

                    else:
                        bins = np.concatenate((result['tstart'][mask],
                                               [result['tstop'][mask][-1]]))
                        bins[0] = np.max(
                            [config['selection']['tmin'], bins[0]])
                        bins[-1] = np.min(
                            [config['selection']['tmax'], bins[-1]])
                        bins = list(bins)

                        # removing trailing zeros
                        counts = calc_counts(t, bins)
                        mcounts_post, mcounts_pre = rm_trailing_zeros(counts)
                        logging.info(
                            "count masks: {0} {1}, bins: {2}, counts: {3}".
                            format(mcounts_post, mcounts_pre, bins, counts))
                        counts = counts[mcounts_post & mcounts_pre]
                        bins = np.concatenate([
                            np.array(bins)[:-1][mcounts_post & mcounts_pre],
                            [
                                np.array(bins)[1:][mcounts_post
                                                   & mcounts_pre].max()
                            ]
                        ])
                        logging.info(
                            "Using bins {0}, total n={1:n} bins".format(
                                bins,
                                len(bins) - 1))
                        logging.info("bins widths : {0}".format(np.diff(bins)))
                        logging.info("counts per bin: {0} ".format(
                            calc_counts(t, bins)))
                        bins = list(bins)


# TODO: test that this is working also with GTIs that have little or no counts

            lc = gta.lightcurve(
                config['selection']['target'],
                binsz=config['lightcurve']['binsz'],
                free_background=config['lightcurve']['free_background'],
                free_params=config['lightcurve']['free_params'],
                free_radius=config['lightcurve']['free_radius'],
                make_plots=False,
                multithread=True,
                nthread=4,
                #multithread = False,
                #nthread = 1,
                save_bin_data=True,
                shape_ts_threshold=16.,
                use_scaled_srcmap=True,
                use_local_ltcube=True,
                write_fits=True,
                write_npy=True,
                time_bins=bins,
                outdir='{0:.0f}s'.format(config['lightcurve']['binsz']))
        else:
            # run the fitting of the entire time and energy range
            try:
                o = gta.optimize()  # perform an initial fit
                logging.debug(o)
            except RuntimeError as e:
                logging.error("Error in optimize: {0}".format(e))
                logging.info("Trying to continue ...")

            gta = set_free_pars_lc(gta, config, fit_config)

            f = gta.fit()

            if 'fix_sources' in fit_config.keys():
                skip = fit_config['fix_sources'].keys()
            else:
                skip = []

            gta, f = refit(gta,
                           config['selection']['target'],
                           f,
                           fit_config['ts_fixed'],
                           skip=skip)
            gta.print_roi()
            gta.write_roi('lc')

            if args.createsed:
                if args.make_plots:
                    init_matplotlib_backend()
                gta.load_roi('lc')  # reload the average spectral fit
                logging.info('Running sed for {0[target]:s}'.format(
                    config['selection']))
                sed = gta.sed(config['selection']['target'],
                            prefix = 'lc_sed',
                            free_radius = None if config['sed']['free_radius'] == 0. \
                                else config['sed']['free_radius'],
                            free_background= config['sed']['free_background'],
                            free_pars = fa.allnorm,
                            make_plots = args.make_plots,
                            cov_scale = config['sed']['cov_scale'],
                            use_local_index = config['sed']['use_local_index'],
                            bin_index = config['sed']['bin_index']
                            )

        # debugging: calculate sed and resid maps for each light curve bin
        #logging.info('Running sed for {0[target]:s}'.format(config['selection']))
        #sed = gta.sed(config['selection']['target'], prefix = 'lc')
        #model = {'Scale': 1000., 'Index' : fit_config['new_src_pl_index'], 'SpatialModel' : 'PointSource'}
        #resid_maps = gta.residmap('lc',model=model, make_plots=True, write_fits = True, write_npy = True)

            if args.srcprob:
                logging.info("Running srcprob with srcmdl {0:s}".format('lc'))
                gta.compute_srcprob(xmlfile='lc', overwrite=True)

    # we are simulating a source
    else:
        # TODO: I probably have to run the setup here. Do on weekly files, i.e., no time cut? Only do that later?

        with open(args.simulate) as f:
            simsource = np.load(f, allow_pickle=True).flat[0]

        # set the source to the simulation value
        gta.set_source_spectrum(
            simsource['target'],
            spectrum_type=simsource['spectrum_type'],
            spectrum_pars=simsource['spectrum_pars'][job_id - 1])

        logging.info("changed spectral parameters to {0}".format(
            gta.roi.get_source_by_name(simsource['target']).spectral_pars))

        # simulate the ROI
        gta.simulate_roi(randomize=bool(args.randomize))
        gta = set_free_pars_lc(gta, config, fit_config)

        # fit the simulation
        f = gta.fit()
        gta, f = refit(gta, config['selection']['target'], f,
                       fit_config['ts_fixed'])
        gta.print_roi()
        gta.write_roi('lc_simulate_{0:s}'.format(simsource['suffix']))
    return gta
예제 #10
0
#first optimization run with output
fit_res = gta.optimize()

gta.write_roi('fit_optimize')

#free parameters for full likelihood fit
gta.free_sources(pars='norm')
gta.free_sources(distance = 3.0)
gta.free_source('galdiff')
gta.free_source('isodiff')


#do the likelihood fit
fit_results = gta.fit()
if fit_results['fit_success']!=True:
    gta.load_roi('fit_optimize.npy')
    gta.free_sources(free=False)
    gta.free_sources(pars='norm', distance = 3.0)
    gta.free_sources(distance = 1.)
    gta.free_source('galdiff')
    gta.free_source('isodiff')
    fit_res2 = gta.fit()
    if fit_results['fit_success']!=True:
        gta.load_roi('fit_optimize.npy')
        gta.free_sources(free=False)
        gta.free_sources(pars='norm', distance = 1.5)
        gta.free_sources(distance = 0.5)
        gta.free_source('galdiff')
        gta.free_source('isodiff')
        fit_res2 = gta.fit()
    
예제 #11
0
def main():
        
    usage = "usage: %(prog)s [config file]"
    description = "Run fermipy analysis chain."
    parser = argparse.ArgumentParser(usage=usage,description=description)

    parser.add_argument('--config', default = 'sample_config.yaml')
    parser.add_argument('--source', default = None)

    args = parser.parse_args()
    gta = GTAnalysis(args.config,logging={'verbosity' : 3},
                     fileio={'workdir_regex' : '\.xml$|\.npy$'})

    gta.setup()

    names = [s.name for s in gta.roi.sources if not s.diffuse]
    gta.reload_sources(names)
    
    sqrt_ts_threshold=3
    
    model0 = { 'SpatialModel' : 'PointSource', 'Index' : 1.5 }
    model1 = { 'SpatialModel' : 'PointSource', 'Index' : 2.0 }
    model2 = { 'SpatialModel' : 'PointSource', 'Index' : 2.5 }
    #src_name = gta.roi.sources[0].name
    if args.source is None:
        src_name = gta.config['selection']['target']
    else:
        src_name = args.source
        
    # -----------------------------------
    # Fit the Baseline Model
    # -----------------------------------

    # Get a reasonable starting point for the spectral model
    gta.free_source(src_name)
    gta.fit()
    gta.free_source(src_name,False)

    gta.optimize()
    
    # Localize 3FGL sources
    for s in gta.roi.sources:

        if not s['SpatialModel'] == 'PointSource':
            continue

        if s['offset'] < 0.5 or s['ts'] < 25.:
            continue

        if s['offset_roi_edge'] > -0.1:
            continue
        
        gta.localize(s.name,nstep=5,dtheta_max=0.5,update=True,
                     prefix='base')

        gta.free_source(s.name,False)

    gta.tsmap('base',model=model1)

    # Look for new point sources outside the inner 1.0 deg

    gta.find_sources('base',model=model1,
                     search_skydir=gta.roi.skydir,
                     max_iter=5,min_separation=0.5,
                     sqrt_ts_threshold=sqrt_ts_threshold,
                     search_minmax_radius=[1.0,None])
    gta.optimize()

    gta.print_roi()

    gta.write_roi('base')

    # -----------------------------------
    # Pass 0 - Source at Nominal Position
    # -----------------------------------

    fit_region(gta,'fit0',src_name)

    # -------------------------------------
    # Pass 1 - Source at Localized Position
    # -------------------------------------

    gta.localize(src_name,nstep=5,dtheta_max=0.5,update=True,
                 prefix='fit1')

    fit_region(gta,'fit1',src_name)
    fit_halo(gta,'fit1',src_name)
    gta.load_roi('fit1')

    # -------------------------------------
    # Pass 2 - 2+ Point Sources
    # -------------------------------------

    srcs = []

    # Fit up to 4 sources
    for i in range(2,6):

        srcs_fit = gta.find_sources('fit%i'%i,
                                    search_skydir=gta.roi.skydir,
                                    max_iter=1,
                                    sources_per_iter=1,
                                    sqrt_ts_threshold=3,
                                    min_separation=0.5,
                                    search_minmax_radius=[None,1.0])

        if len(srcs_fit['sources']) == 0:
            break

        srcs += srcs_fit['sources']
        best_fit_idx = i
        
        gta.localize(src_name,nstep=5,dtheta_max=0.4,
                     update=True,prefix='fit%i'%i)

        # Relocalize new sources
        for s in sorted(srcs, key=lambda t: t['ts'],reverse=True):        
            gta.localize(s.name,nstep=5,dtheta_max=0.4,
                         update=True,prefix='fit%i'%i)

        fit_region(gta,'fit%i'%i,src_name)
        fit_halo(gta,'fit%i'%i,src_name)

        gta.load_roi('fit%i'%i)
        
    new_source_data = []
    for s in srcs:
        src_data = gta.roi[s.name].data
        new_source_data.append(copy.deepcopy(src_data))

    np.save(os.path.join(gta.workdir,'new_source_data.npy'),
            new_source_data)
예제 #12
0
                     sqrt_ts_threshold=sqrt_ts_threshold,
                     search_minmax_radius=[1.0,None])
    gta.optimize()

    gta.print_roi()

    gta.write_roi('base')

    # -----------------------------------
    # Pass 0 - Source at Nominal Position
    # -----------------------------------

    fit_region(gta,'fit0',src_name)
    #fit_region(gta,'fit0_emin40',src_name,erange=[4.0,5.5])

    gta.load_roi('fit0')

    # -------------------------------------
    # Pass 1 - Source at Localized Position
    # -------------------------------------

    gta.localize(src_name,nstep=5,dtheta_max=0.5,update=True,
                 prefix='fit1')

    fit_region(gta,'fit1',src_name)
    fit_halo(gta,'fit1',src_name,halo_width,halo_index)

    #fit_region(gta,'fit1_emin40',src_name,erange=[4.0,5.5])
    #fit_halo(gta,'fit1_emin40',src_name,halo_width,halo_index,erange=[4.0,5.5])

    gta.load_roi('fit1')
예제 #13
0
    def run_analysis(self, argv):
        """Run this analysis"""
        args = self._parser.parse_args(argv)

        if not HAVE_ST:
            raise RuntimeError(
                "Trying to run fermipy analysis, but don't have ST")

        if is_null(args.skydirs):
            skydir_dict = None
        else:
            skydir_dict = load_yaml(args.skydirs)

        gta = GTAnalysis(args.config,
                         logging={'verbosity': 3},
                         fileio={'workdir_regex': '\.xml$|\.npy$'})
        #gta.setup(overwrite=False)
        gta.load_roi(args.roi_baseline)
        gta.print_roi()

        basedir = os.path.dirname(args.config)
        # This should be a no-op, b/c it was done in the baseline analysis

        for profile in args.profiles:
            if skydir_dict is None:
                skydir_keys = [None]
            else:
                skydir_keys = sorted(skydir_dict.keys())

            for skydir_key in skydir_keys:
                if skydir_key is None:
                    pkey, psrc_name, pdict = build_profile_dict(basedir, profile)
                else:
                    skydir_val = skydir_dict[skydir_key]
                    pkey, psrc_name, pdict = build_profile_dict(basedir, profile)
                    pdict['ra'] = skydir_val['ra']
                    pdict['dec'] = skydir_val['dec']
                    pkey += "_%06i" % skydir_key

                outfile = "sed_%s.fits" % pkey

                # Add the source and get the list of correlated soruces
                correl_dict, test_src_name = add_source_get_correlated(gta, psrc_name, 
                                                                       pdict, correl_thresh=0.25, 
                                                                       non_null_src=args.non_null_src)

                # Write the list of correlated sources
                correl_yaml = os.path.join(basedir, "correl_%s.yaml" % pkey)
                write_yaml(correl_dict, correl_yaml)

                gta.free_sources(False)
                for src_name in correl_dict.keys():
                    gta.free_source(src_name, pars='norm')

                # build the SED
                if args.non_null_src:
                    gta.update_source(test_src_name, reoptimize=True)
                    gta.write_roi("base_%s"% pkey, make_plots=False)
                gta.sed(test_src_name, prefix=pkey, outfile=outfile, make_plots=args.make_plots)

                # remove the source
                gta.delete_source(test_src_name)
                # put the ROI back to how it was
                gta.load_xml(args.roi_baseline)

        return gta
예제 #14
0
    if not check_log(logfile) == "Successful":
        sys.exit(1)

    gta.setup()

    sqrt_ts_threshold = 3

    halo_width = np.logspace(-1, 0, 9)
    halo_index = np.array([1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0])

    model0 = {"SpatialModel": "PointSource", "Index": 1.5}
    model1 = {"SpatialModel": "PointSource", "Index": 2.0}
    model2 = {"SpatialModel": "PointSource", "Index": 2.5}
    src_name = gta.roi.sources[0].name

    gta.load_roi("base", reload_sources=True)
    # gta.tsmap('base',model=model1)
    gta.tsmap("base_emin40", model=model1, erange=[4.0, 5.5])

    gta.print_roi()

    # -----------------------------------
    # Pass 0 - Source at Nominal Position
    # -----------------------------------

    gta.load_roi("fit0", reload_sources=True)

    # fit_region(gta,'fit0',src_name)
    fit_region(gta, "fit0_emin40", src_name, erange=[4.0, 5.5])

    # -------------------------------------