Exemplo n.º 1
0
import numpy as np


from astropy.table import Table, Column, vstack

from fermipy.utils import load_yaml, init_matplotlib_backend

from fermipy.jobs.utils import is_not_null
from fermipy.jobs.link import Link
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path

from fermipy.jobs.name_policy import NameFactory
from fermipy.jobs import defaults

init_matplotlib_backend('Agg')


NAME_FACTORY = NameFactory(basedir=('.'))


def _get_enum_bins(configfile):
    """Get the number of energy bin in the SED

    Parameters
    ----------

    configfile : str
        Fermipy configuration file.

    Returns
Exemplo n.º 2
0
import sys
from fermipy import utils
utils.init_matplotlib_backend()
from fermipy.gtanalysis import GTAnalysis
from fermipy.utils import *
import yaml
import pprint
import numpy
import argparse

from fermipy.gtanalysis import GTAnalysis


def main():

    usage = "usage: %(prog)s [config file]"
    description = "Run fermipy analysis chain."
    parser = argparse.ArgumentParser(usage=usage, description=description)

    parser.add_argument('--config', default='sample_config.yaml')
    parser.add_argument('--source', default=None)

    args = parser.parse_args()
    gta = GTAnalysis(args.config)

    if args.source is None:
        src_name = gta.roi.sources[0].name

    gta.setup()
    gta.optimize()
Exemplo n.º 3
0
from os.path import splitext

from fermipy.utils import init_matplotlib_backend, load_yaml

from fermipy.jobs.link import Link
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path

from fermipy.castro import CastroData
from fermipy.sed_plotting import plotCastro

from fermipy.jobs.name_policy import NameFactory
from fermipy.jobs import defaults

init_matplotlib_backend()
NAME_FACTORY = NameFactory(basedir='.')


class PlotCastro(Link):
    """Small class to plot an SED as a 'Castro' plot.
    """
    appname = 'fermipy-plot-castro'
    linkname_default = 'plot-castro'
    usage = '%s [options]' % (appname)
    description = "Plot likelihood v. flux normalization and energy"

    default_options = dict(infile=defaults.generic['infile'],
                           outfile=defaults.generic['outfile'])

    __doc__ += Link.construct_docstring(default_options)
Exemplo n.º 4
0
from os.path import splitext

from fermipy.utils import init_matplotlib_backend, load_yaml

from fermipy.jobs.link import Link
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path

from fermipy.castro import CastroData
from fermipy.sed_plotting import plotCastro

from fermipy.jobs.name_policy import NameFactory
from fermipy.jobs import defaults

init_matplotlib_backend()
NAME_FACTORY = NameFactory(basedir='.')


class PlotCastro(Link):
    """Small class to plot an SED as a 'Castro' plot.
    """
    appname = 'fermipy-plot-castro'
    linkname_default = 'plot-castro'
    usage = '%s [options]' % (appname)
    description = "Plot likelihood v. flux normalization and energy"

    default_options = dict(infile=defaults.generic['infile'],
                           outfile=defaults.generic['outfile'])

    __doc__ += Link.construct_docstring(default_options)
Exemplo n.º 5
0
import os
import sys
import numpy as np

from fermipy.utils import load_yaml, write_yaml, init_matplotlib_backend

from fermipy.jobs.utils import is_null, is_not_null
from fermipy.jobs.link import Link
from fermipy.jobs.analysis_utils import add_source_get_correlated, build_profile_dict
from fermipy.jobs.scatter_gather import ScatterGather

from fermipy.jobs.name_policy import NameFactory
from fermipy.jobs import defaults

init_matplotlib_backend('Agg')

try:
    from fermipy.gtanalysis import GTAnalysis
    HAVE_ST = True
except ImportError:
    HAVE_ST = False

NAME_FACTORY = NameFactory(basedir=('.'))



class AnalyzeExtension(Link):
    """Small class to wrap an analysis script.

    This particular script does target extension analysis
Exemplo n.º 6
0
def main():
    usage = "usage: %(prog)s -c config.yaml"
    description = "Run the lc analysis"
    parser = argparse.ArgumentParser(usage=usage, description=description)
    parser.add_argument('-c', '--conf', required=True)
    parser.add_argument('-i',
                        required=False,
                        default=0,
                        help='Set local or scratch calculation',
                        type=int)
    parser.add_argument('--state',
                        help='analysis state',
                        choices=['avgspec', 'setup'],
                        default='avgspec')
    parser.add_argument('--forcepl',
                        default=0,
                        help='Force the target source to have power-law shape',
                        type=int)
    parser.add_argument('--createsed',
                        default=0,
                        help='Create SED from best fit model',
                        type=int)
    parser.add_argument(
        '--adaptive',
        default=0,
        help='Use adaptive binning for minute scale light curves',
        type=int)
    parser.add_argument('--srcprob', default = 0,
                        help='Calculate the source probability for the photons,' \
                            ' only works when no sub orbit time scales are used',
                        type=int)
    parser.add_argument(
        '--mincounts',
        default=2,
        help='Minimum number of counts within LC bin to run analysis',
        type=int)
    parser.add_argument('--simulate', default = None,
                        help='None or full path to yaml file which contains src name' \
                        'and spec to be simulated',
                        )
    parser.add_argument(
        '--make_plots',
        default=0,
        type=int,
        help='Create sed plot',
    )
    parser.add_argument(
        '--randomize',
        default=1,
        help=
        'If you simulate, use Poisson realization. If false, use Asimov data set',
        type=int)
    args = parser.parse_args()

    utils.init_logging('DEBUG')
    config = yaml.load(open(args.conf))
    tmpdir, job_id = lsf.init_lsf()
    if not job_id:
        job_id = args.i
    logging.info('tmpdir: {0:s}, job_id: {1:n}'.format(tmpdir, job_id))
    os.chdir(tmpdir)  # go to tmp directory
    logging.info('Entering directory {0:s}'.format(tmpdir))
    logging.info('PWD is {0:s}'.format(os.environ["PWD"]))

    # copy the ft1,ft2 and ltcube files
    #for k in ['evfile','scfile','ltcube']:
    # don't stage them, done automatically by fermipy if needed
    #        config[k] = utils.copy2scratch(config[k], tmpdir)
    # set the scratch directories
    logging.debug(config['data'])
    config['fileio']['scratchdir'] = tmpdir

    # set the log file
    logdir = copy.deepcopy(config['fileio']['logfile'])
    config['fileio']['logfile'] = path.join(tmpdir, 'fermipy.log')
    # debugging: all files will be saved (default is False)
    #config['fileio']['savefits'] = True

    # if simulating an orbit, save fits files
    if args.simulate is not None:
        config['fileio']['savefits'] = True

    # copy all fits files already present in outdir
    # run the analysis
    lc_config = copy.deepcopy(config['lightcurve'])
    fit_config = copy.deepcopy(config['fit_pars'])

    # remove parameters from config file not accepted by fermipy
    for k in ['configname', 'tmp', 'log', 'fit_pars']:
        config.pop(k, None)
    if 'adaptive' in config['lightcurve'].keys():
        config['lightcurve'].pop('adaptive', None)

    # set the correct time bin
    config['selection']['tmin'], config['selection']['tmax'], nj = set_lc_bin(
        config['selection']['tmin'],
        config['selection']['tmax'],
        config['lightcurve']['binsz'],
        job_id - 1 if job_id > 0 else 0,
        ft1=config['data']['evfile'])
    logging.debug('setting light curve bin' + \
        '{0:n}, between {1[tmin]:.0f} and {1[tmax]:.0f}'.format(job_id, config['selection']))
    if args.adaptive:
        config['fileio']['outdir'] = utils.mkdir(
            path.join(config['fileio']['outdir'],
                      'adaptive{0:.0f}/'.format(lc_config['adaptive'])))

    if args.state == 'setup':
        config['fileio']['outdir'] = utils.mkdir(
            path.join(config['fileio']['outdir'],
                      'setup{0:05n}/'.format(job_id if job_id > 0 else 1)))
    else:
        config['fileio']['outdir'] = utils.mkdir(
            path.join(config['fileio']['outdir'],
                      '{0:05n}/'.format(job_id if job_id > 0 else 1)))

    logging.info('Starting with fermipy analysis')
    logging.info('using fermipy version {0:s}'.format(fermipy.__version__))
    logging.info('located at {0:s}'.format(fermipy.__file__))

    if config['data']['ltcube'] == '':
        config['data'].pop('ltcube', None)

    compute_sub_gti_lc = False
    if type(config['lightcurve']['binsz']) == str:
        if len(config['lightcurve']['binsz'].strip('gti')):
            compute_sub_gti_lc = True
            if config['lightcurve']['binsz'].find('min') > 0:
                config['lightcurve']['binsz'] = float(
                    config['lightcurve']['binsz'].strip('gti').strip(
                        'min')) * 60.
                logging.info("set time bin length to {0:.2f}s".format(
                    config['lightcurve']['binsz']))
        else:
            config['lightcurve']['binsz'] = 3. * 3600.
    try:
        gta = GTAnalysis(config, logging={'verbosity': 3})
    except Exception as e:
        logging.error("{0}".format(e))
        config['selection']['target'] = None
        gta = GTAnalysis(config, logging={'verbosity': 3})
        sep = gta.roi.sources[0]['offset']
        logging.warning(
            "Source closets to ROI center is {0:.3f} degree away".format(sep))
        if sep < 0.1:
            config['selection']['target'] = gta.roi.sources[0]['name']
            gta.config['selection']['target'] = config['selection']['target']
            logging.info("Set target to {0:s}".format(
                config['selection']['target']))

    # stage the full time array analysis results to the tmp dir
    # do not copy png images
    files = [
        fn for fn in glob(fit_config['avgspec'])
        if fn.find('.xml') > 0 or fn.find('.npy') > 0
    ]
    files += [config['data']['evfile']]
    utils.copy2scratch(files, gta.workdir)

    # we're using actual data
    if args.simulate is None:
        # check before the analysis start if there are any events in the master file
        # in the specified time range
        logging.info('Checking for events in initial ft1 file')
        t = Table.read(path.join(gta.workdir,
                                 path.basename(config['data']['evfile'])),
                       hdu='EVENTS')
        logging.info("times in base ft1: {0} {1} {2}".format(
            t["TIME"].max(), t["TIME"].min(),
            t["TIME"].max() - t["TIME"].min()))
        m = (t["TIME"] >= config['selection']['tmin']) & (
            t["TIME"] <= config['selection']['tmax'])
        if np.sum(m) < args.mincounts + 1:
            logging.error(
                "*** Only {0:n} events between tmin and tmax! Exiting".format(
                    np.sum(m)))
            assert np.sum(m) > args.mincounts
        else:
            logging.info("{0:n} events between tmin and tmax".format(
                np.sum(m)))

        # check how many bins are in each potential light curve bin
        if compute_sub_gti_lc:
            # select time of first and last
            # photon instead of GTI time
            m = (t["TIME"] >= config['selection']['tmin']) & \
                 (t["TIME"] <= config['selection']['tmax'])

            tmin = t["TIME"][m].min() - 1.
            tmax = t["TIME"][m].max() + 1.
            logging.info("There will be up to {0:n} time bins".format(np.ceil(
                (tmax - tmin) / \
                config['lightcurve']['binsz'])))

            bins = np.arange(tmin, tmax, config['lightcurve']['binsz'])
            bins = np.concatenate([bins, [config['selection']['tmax']]])
            counts = calc_counts(t, bins)
            # remove the starting times of the bins with zero counts
            # and rebin the data
            logging.info("Counts before rebinning: {0}".format(counts))
            mincounts = 10.
            mc = counts < mincounts
            if np.sum(mc):
                # remove trailing zeros
                if np.any(counts == 0.):
                    mcounts_post, mcounts_pre = rm_trailing_zeros(counts)
                    counts = counts[mcounts_post & mcounts_pre]
                    bins = np.concatenate([
                        bins[:-1][mcounts_post & mcounts_pre],
                        [bins[1:][mcounts_post & mcounts_pre].max()]
                    ])
                bins = rebin(counts, bins)
                logging.info("Bin lengths after rebinning: {0}".format(
                    np.diff(bins)))
                logging.info("Bin times after rebinning: {0}".format(bins))
                counts = calc_counts(t, bins)
                logging.info("Counts after rebinning: {0}".format(counts))
            else:
                logging.info("Regular time binning will be used")
            bins = list(bins)

    logging.info('Running fermipy setup')
    try:
        gta.setup()
    except (RuntimeError, IndexError) as e:
        logging.error(
            'Caught Runtime/Index Error while initializing analysis object')
        logging.error('Printing error:')
        logging.error(e)
        if e.message.find("File not found") >= 0 and e.message.find(
                'srcmap') >= 0:
            logging.error("*** Srcmap calculation failed ***")
        if e.message.find("NDSKEYS") >= 0 and e.message.find('srcmap') >= 0:
            logging.error(
                "*** Srcmap calculation failed with NDSKEYS keyword not found in header ***"
            )

        logging.info("Checking if there are events in ft1 file")
        ft1 = path.join(gta.workdir, 'ft1_00.fits')
        f = glob(ft1)
        if not len(f):
            logging.error(
                "*** no ft1 file found at location {0:s}".format(ft1))
            raise
        t = Table.read(f[0], hdu='EVENTS')
        if not len(t):
            logging.error("*** The ft1 file contains no events!! ***".format(
                len(t)))
        else:
            logging.info("The ft1 file contains {0:n} event(s)".format(len(t)))
        return

    # end here if you only want to calulate
    # intermediate fits files
    if args.state == 'setup':
        return gta

    logging.info('Loading the fit for the average spectrum')
    gta.load_roi('avgspec')  # reload the average spectral fit
    logging.info('Running fermipy optimize and fit')

    # we're using actual data
    if args.simulate is None:
        if args.forcepl:
            gta = set_src_spec_pl(
                gta, gta.get_source_name(config['selection']['target']))
# to do add EBL absorption at some stage ...
#        gta = add_ebl_atten(gta, gta.get_source_name(config['selection']['target']), fit_config['z'])

# make sure you are fitting data
        gta.simulate_roi(restore=True)

        if compute_sub_gti_lc:
            if args.adaptive:
                # do import only here since root must be compiled
                from fermiAnalysis import adaptivebinning as ab
                # compute the exposure
                energy = 1000.
                texp, front, back = ab.comp_exposure_phi(gta, energy=1000.)
                # compute the bins
                result = ab.time_bins(
                    gta,
                    texp,
                    0.5 * (front + back),
                    #critval = 20., # bins with ~20% unc
                    critval=lc_config['adaptive'],
                    Epivot=None,  # compute on the fly
                    #                        tstart = config['selection']['tmin'],
                    #                        tstop = config['selection']['tmax']
                )

                # cut the bins to this GTI
                mask = result['tstop'] > config['selection']['tmin']
                mask = mask & (result['tstart'] < config['selection']['tmax'])

                # try again with catalog values
                if not np.sum(mask):
                    logging.error(
                        "Adaptive bins outside time window, trying catalog values for flux"
                    )
                    result = ab.time_bins(
                        gta,
                        texp,
                        0.5 * (front + back),
                        critval=lc_config['adaptive'],  # bins with ~20% unc
                        Epivot=None,  # compute on the fly
                        forcecatalog=True,
                        #                        tstart = config['selection']['tmin'],
                        #                        tstop = config['selection']['tmax']
                    )

                    # cut the bins to this GTI
                    mask = result['tstop'] > config['selection']['tmin']
                    mask = mask & (result['tstart'] <
                                   config['selection']['tmax'])
                    if not np.sum(mask):
                        logging.error(
                            "Adaptive bins do not cover selected time interval!"
                        )
                        logging.error("Using original bins")

                    else:
                        bins = np.concatenate((result['tstart'][mask],
                                               [result['tstop'][mask][-1]]))
                        bins[0] = np.max(
                            [config['selection']['tmin'], bins[0]])
                        bins[-1] = np.min(
                            [config['selection']['tmax'], bins[-1]])
                        bins = list(bins)

                        # removing trailing zeros
                        counts = calc_counts(t, bins)
                        mcounts_post, mcounts_pre = rm_trailing_zeros(counts)
                        logging.info(
                            "count masks: {0} {1}, bins: {2}, counts: {3}".
                            format(mcounts_post, mcounts_pre, bins, counts))
                        counts = counts[mcounts_post & mcounts_pre]
                        bins = np.concatenate([
                            np.array(bins)[:-1][mcounts_post & mcounts_pre],
                            [
                                np.array(bins)[1:][mcounts_post
                                                   & mcounts_pre].max()
                            ]
                        ])
                        logging.info(
                            "Using bins {0}, total n={1:n} bins".format(
                                bins,
                                len(bins) - 1))
                        logging.info("bins widths : {0}".format(np.diff(bins)))
                        logging.info("counts per bin: {0} ".format(
                            calc_counts(t, bins)))
                        bins = list(bins)


# TODO: test that this is working also with GTIs that have little or no counts

            lc = gta.lightcurve(
                config['selection']['target'],
                binsz=config['lightcurve']['binsz'],
                free_background=config['lightcurve']['free_background'],
                free_params=config['lightcurve']['free_params'],
                free_radius=config['lightcurve']['free_radius'],
                make_plots=False,
                multithread=True,
                nthread=4,
                #multithread = False,
                #nthread = 1,
                save_bin_data=True,
                shape_ts_threshold=16.,
                use_scaled_srcmap=True,
                use_local_ltcube=True,
                write_fits=True,
                write_npy=True,
                time_bins=bins,
                outdir='{0:.0f}s'.format(config['lightcurve']['binsz']))
        else:
            # run the fitting of the entire time and energy range
            try:
                o = gta.optimize()  # perform an initial fit
                logging.debug(o)
            except RuntimeError as e:
                logging.error("Error in optimize: {0}".format(e))
                logging.info("Trying to continue ...")

            gta = set_free_pars_lc(gta, config, fit_config)

            f = gta.fit()

            if 'fix_sources' in fit_config.keys():
                skip = fit_config['fix_sources'].keys()
            else:
                skip = []

            gta, f = refit(gta,
                           config['selection']['target'],
                           f,
                           fit_config['ts_fixed'],
                           skip=skip)
            gta.print_roi()
            gta.write_roi('lc')

            if args.createsed:
                if args.make_plots:
                    init_matplotlib_backend()
                gta.load_roi('lc')  # reload the average spectral fit
                logging.info('Running sed for {0[target]:s}'.format(
                    config['selection']))
                sed = gta.sed(config['selection']['target'],
                            prefix = 'lc_sed',
                            free_radius = None if config['sed']['free_radius'] == 0. \
                                else config['sed']['free_radius'],
                            free_background= config['sed']['free_background'],
                            free_pars = fa.allnorm,
                            make_plots = args.make_plots,
                            cov_scale = config['sed']['cov_scale'],
                            use_local_index = config['sed']['use_local_index'],
                            bin_index = config['sed']['bin_index']
                            )

        # debugging: calculate sed and resid maps for each light curve bin
        #logging.info('Running sed for {0[target]:s}'.format(config['selection']))
        #sed = gta.sed(config['selection']['target'], prefix = 'lc')
        #model = {'Scale': 1000., 'Index' : fit_config['new_src_pl_index'], 'SpatialModel' : 'PointSource'}
        #resid_maps = gta.residmap('lc',model=model, make_plots=True, write_fits = True, write_npy = True)

            if args.srcprob:
                logging.info("Running srcprob with srcmdl {0:s}".format('lc'))
                gta.compute_srcprob(xmlfile='lc', overwrite=True)

    # we are simulating a source
    else:
        # TODO: I probably have to run the setup here. Do on weekly files, i.e., no time cut? Only do that later?

        with open(args.simulate) as f:
            simsource = np.load(f, allow_pickle=True).flat[0]

        # set the source to the simulation value
        gta.set_source_spectrum(
            simsource['target'],
            spectrum_type=simsource['spectrum_type'],
            spectrum_pars=simsource['spectrum_pars'][job_id - 1])

        logging.info("changed spectral parameters to {0}".format(
            gta.roi.get_source_by_name(simsource['target']).spectral_pars))

        # simulate the ROI
        gta.simulate_roi(randomize=bool(args.randomize))
        gta = set_free_pars_lc(gta, config, fit_config)

        # fit the simulation
        f = gta.fit()
        gta, f = refit(gta, config['selection']['target'], f,
                       fit_config['ts_fixed'])
        gta.print_roi()
        gta.write_roi('lc_simulate_{0:s}'.format(simsource['suffix']))
    return gta
Exemplo n.º 7
0
def main():
    usage = "usage: %(prog)s -c config.yaml"
    description = "Run the analysis"
    parser = argparse.ArgumentParser(usage=usage, description=description)
    parser.add_argument('-c', '--conf', required=True)
    parser.add_argument('--halo-template-dir',
                        required=True,
                        help='Directory with halo template fits files')
    parser.add_argument('--halo-template-suffix',
                        required=True,
                        help='suffix for halo template fits fits')
    parser.add_argument('--file-suffix',
                        help='additional suffix for output files',
                        default='')
    parser.add_argument('--state',
                        default=['avgspec'],
                        choices=['avgspec', 'avgspec_ebl'],
                        help='Analysis state')
    parser.add_argument('-i',
                        required=False,
                        default=0,
                        help='Set local or scratch calculation',
                        type=int)
    parser.add_argument('--overwrite',
                        action="store_true",
                        help='overwrite existing single files')
    parser.add_argument('--generate-seds',
                        action="store_true",
                        help='Generate SEDs during analysis')
    parser.add_argument('--generate-maps',
                        action="store_true",
                        help='Generate TS and residual maps during analysis')

    args = parser.parse_args()

    gta, config, fit_config, job_id = setup.init_gta(args.conf,
                                                     i=args.i,
                                                     logging_level="INFO")
    gta.logger.info('Running fermipy setup')

    init_matplotlib_backend()

    gta.logger.info('reloading {0:s}'.format(args.state))
    gta.load_roi(args.state)  # reload the average spectral fit

    modelname = "{0:s}_{1:s}{2:s}".format(
        args.state, '_'.join([
            k for k in args.halo_template_dir.split('/')[-4:]
            if not 'spec' in k
        ]), args.file_suffix)

    gta.logger.info("Using modelname: {0:s}".format(modelname))
    # change the outdir
    # not the greatest that I'm not using the API here,
    # but no other possibility
    gta._outdir = os.path.join(gta.outdir, 'igmf_' + modelname + '/')
    if not os.path.exists(gta.outdir):
        os.makedirs(gta.outdir)
    gta.logger.info("Set new outdir: {0:s}".format(gta.outdir))

    gta.logger.info(
        "reloaded ROI had log likelihood value: {0:.2f}".format(-gta.like()))
    halo_profile_tied = fit_igmf_halo_scan(
        gta,
        modelname,
        config['selection']['target'],
        args.halo_template_dir,
        model_idx=job_id,
        halo_template_suffix=args.halo_template_suffix,
        injection_spectrum='PLSuperExpCutoff',
        injection_par2_name='Cutoff',
        injection_norm_name='Prefactor',
        injection_scale_name='Scale',
        index_par_name='Index',
        free_bkgs=True,
        generate_maps=args.generate_maps,
        generate_seds=args.generate_seds,
        distance_free_norm=
        3.,  # at 1e-14 G, above 2. deg about 10% of cascade photons are beyond 2 deg at 1GeV
        z=fit_config['z'],
        ebl_model_name='dominguez',
        optimizer='MINUIT')

    return gta, halo_profile_tied
Exemplo n.º 8
0
def main():
    usage = "usage: %(prog)s -c config.yaml"
    description = "Run the analysis"
    parser = argparse.ArgumentParser(usage=usage,description=description)
    parser.add_argument('-c', '--conf', required = True)
    parser.add_argument('--state', default = ['avgspec'],
                        choices = ['avgspec','avgspec_ebl'],
                            help='Analysis state')
    parser.add_argument('-i', required=False, default = 0, 
                        help='Set local or scratch calculation', type=int)
    parser.add_argument('--create_ts_maps', required=False, default = 1, 
                        help='Generate TS maps', type=int)
    args = parser.parse_args()

    gta, config, fit_config, job_id  = setup.init_gta(args.conf, i = args.i, logging_level = "INFO")
    gta.logger.info('Running fermipy setup')

    init_matplotlib_backend()

    #files = [fn for fn in glob(fit_config['avgspec']) if fn.find('.xml') > 0 or fn.find('.npy') > 0]
    #if len(files):
    #    utils.copy2scratch(files, gta.workdir)
    #else:
    #    gta.logger.error("No files found in {0:s}".format(fit_config['avgspec']))

    gta.setup()

    gta.logger.info('reloading {0:s}'.format(args.state))
    gta.load_roi(args.state) # reload the average spectral fit

    modelname = "{0:s}".format(args.state)

    # change the outdir
    # not the greatest that I'm not using the API here, 
    # but no other possibility
    gta._outdir = os.path.join(gta.outdir, 'extension_' + modelname + '/')
    if not os.path.exists(gta.outdir):
        os.makedirs(gta.outdir)
    gta.logger.info("Set new outdir: {0:s}".format(gta.outdir))

    free_radius_sed = fit_config.get('extension', dict(free_radius_sed=1.)).pop('free_radius_sed', 1.)
    force_ps = fit_config.get('extension', dict(force_ps=False)).pop('force_ps', False)
    free_shape_target = fit_config.get('extension', dict(free_shape_target=False)).pop('free_shape_target', False)
    distance_free_norm = fit_config.get('extension', dict(distance_free_norm=1.5)).pop('distance_free_norm', 1.5)
    distance_free_shape = fit_config.get('extension', dict(distance_free_shape=1.)).pop('distance_free_shape', 1.)
    halo_fit = fit_config.get('extension', dict(halo_fit=False)).pop('halo_fit', False)
    halo_scan = fit_config.get('extension', dict(halo_scan=False)).pop('halo_scan', False)
    fit_halo_kwargs = fit_config.get('extension', dict(fit_halo_kwargs={})).pop('fit_halo_kwargs', {})
    scan_halo_kwargs = fit_config.get('extension', dict(scan_halo_kwargs={})).pop('scan_halo_kwargs', {})


    fit_region(gta, modelname, gta.config['selection']['target'],
               loge_bounds=None, 
               skip_opt=list(fit_config.get('fix_sources', {}).keys()),
               shape_ts_threshold=9.0,
               force_ps=force_ps,
               create_maps=args.create_ts_maps, 
               create_sed=False,
               free_radius_sed=free_radius_sed,
               distance_free_norm=distance_free_norm,
               distance_free_shape=distance_free_shape,
               free_shape_target=free_shape_target,
               **fit_config.get('extension', {})
               )

    if halo_fit:
        fit_halo(gta, modelname, gta.config['selection']['target'],
                 **fit_halo_kwargs
                )

    if halo_scan:
        fit_halo_scan(gta, modelname, gta.config['selection']['target'],
                 **fit_halo_kwargs
                )

    return gta, fit_config
Exemplo n.º 9
0
def main():
    usage = "usage: %(prog)s -c config.yaml"
    description = "Run the analysis"
    parser = argparse.ArgumentParser(usage=usage,description=description)
    parser.add_argument('-c', '--conf', required = True)
    parser.add_argument('--overwrite', required=False, default = 0, 
                        help='Overwrite existing files', type=int)
    parser.add_argument('--state', default = ['setup'],
                        choices = ['setup','avgspec','avgspec_ebl','lcbin', 'lcmonthly'],
                            help='Analysis state')
    parser.add_argument('-i', required=False, default = 0, 
                        help='Set local or scratch calculation', type=int)
    parser.add_argument('--specin', required=False, default = -2.1, 
                        help='Spectral index used for gtexposure in lieu of target in xml file',
                        type=float)
    parser.add_argument('--addnewsrcs', default = 0,  
                        help='Search for and add new sources and create residual map',
                        type=int)
    parser.add_argument('--reloadfit', default = 0,  
                        help='Reload ROI from avgspec xml file',
                        type=int)
    parser.add_argument('--relocalize', default = 0,  
                        help='Relocalize central source',
                        type=int)
    parser.add_argument('--createsed', default = 0,  
                        help='Create SED from best fit model',
                        type=int)
    parser.add_argument('--forcespec', default = 0,  
                        help='Recompute model parameters',
                        type=int)
    parser.add_argument('--freezesupexp', default = 0,  
                        help='freeze super exponential index parameters',
                        type=int)
    parser.add_argument('--restorecatspec', default = 0,  
                        help='Restore intitial catalog spectrum',
                        type=int)
    parser.add_argument('--sethardexpcutoff', default = 0,  
                        help='Manually change parameters of PL with SuperExpCutoff',
                        type=int)
    parser.add_argument('--pivotE_free', default = 0,  
                        help='let the pivot energy free during fit if spectrum is changed',
                        type=int)
    parser.add_argument('--forcepl', default = 0,  
                        help='Force the target source to have power-law shape',
                        type=int)
    parser.add_argument('--profile2d', default = 0,  
                        help='Compute 2D likelihood surface for PL index and normalization',
                        type=int)
    parser.add_argument('--srcprob', default = 0,  
                        help='Calculate the source probability for the photons,' \
                            ' only works when no sub orbit time scales are used',
                        type=int)
    parser.add_argument('--psf', default = 0,  
                        help='Calculate the psf',
                        type=int)
    parser.add_argument('--make_plots', default = 0, type=int,   
                        help='Create plots',
                        )
    parser.add_argument('--drm', default = 0,  
                        help='Calculate the detector response matrix',
                        type=int)
    args = parser.parse_args()

    gta, config, fit_config, job_id  = setup.init_gta(args.conf, i = args.i, logging_level = "INFO")
    logging.info('Running fermipy setup')

    if args.make_plots:
        init_matplotlib_backend()

    if args.reloadfit:
        files = [fn for fn in glob(fit_config['avgspec']) if fn.find('.xml') > 0 or fn.find('.npy') > 0]
        if len(files):
            utils.copy2scratch(files, gta.workdir)
        else:
            logging.error("No files found in {0:s}".format(fit_config['avgspec']))
            args.reloadfit = False

    if args.state == 'lcbin':
        pps = PreparePointSource(config,logging={'verbosity' : 3})
        pps.setup()
        pps._bin_data_lc(overwrite = args.overwrite)
        pps._compute_lc_exp(overwrite = args.overwrite,
                        specin = args.specin)
        return pps

    elif args.state == 'setup':
        try:
            gta.setup()
        except RuntimeError as e:
            logging.error("setup ended with runtime error:\n{0}.".format(e))

        if args.psf:
            logging.info("Running psf")
            gta.compute_psf(overwrite = True)

        if args.drm:
            logging.info("Running drm")
            gta.compute_drm(overwrite = True)

        return None, gta, fit_config


    elif args.state.find('avgspec') >= 0:
        gta.setup()

        if args.psf:
            logging.info("Running psf")
            gta.compute_psf(overwrite = True)

        if args.drm:
            logging.info("Running drm")
            gta.compute_drm(overwrite = True)

        if not type(config['selection']['target']) == str:
            # target name not given
            # take closest source to ROI center if separation 
            # is less then 0.1 degree
            logging.warning("Target name is {0}".format(config['selection']['target']))
            sep = gta.roi.sources[0]['offset'] 
            logging.warning("Source closets to ROI center is {0:.3f} degree away".format(sep))
            if sep < 0.1:
                config['selection']['target'] = gta.roi.sources[0]['name']
                logging.info("Set target to {0:s}".format(config['selection']['target']))
            else: # add source at center of ROI
                csrc = SkyCoord(ra = config['selection']['ra'],
                        dec = config['selection']['dec'], frame = 'fk5', unit = 'degree')
                if csrc.dec.value < 0.:
                    sign = '-'
                else:
                    sign = '+'
                newname = 'j{0:02.0f}{1:02.0f}{2:s}{3:02.0f}{4:02.0f}'.format(
                                        csrc.ra.hms.h, csrc.ra.hms.m, sign, 
                                        np.abs(csrc.dec.dms.d), np.abs(csrc.dec.dms.m))
                gta.add_source(newname,{
                                'ra' : config['selection']['ra'], 'dec' : config['selection']['dec'],
                                'SpectrumType' : 'PowerLaw', 'Index' : fit_config['new_src_pl_index'],
                                'Scale' : fit_config['pivotE'] if 'pivotE' in fit_config.keys() else 1000.,
                                'Prefactor' : 1e-11,
                                'SpatialModel' : 'PointSource' })
                config['selection']['target'] = newname
                logging.info("Set target to {0:s}".format(config['selection']['target']))

        if args.reloadfit:
            # save old spectrum
            spec_cat = gta.roi.get_source_by_name(config['selection']['target'])
            try:
                gta.load_roi(args.state) # reload the average spectral fit
            except:
                logging.error("Could not reload fit. Continuing anyway.")
        logging.info('Running fermipy optimize and fit')
        # gives "failed to create spline" in get_parameter_limits function

        if 'source_spec' in fit_config.keys():
            m = gta.roi.get_source_by_name(config['selection']['target'])
            if not m['SpectrumType'] == fit_config['source_spec'] or args.forcespec:
                if fit_config['source_spec'] == 'PowerLaw':
                    gta, _ = set_src_spec_pl(gta, gta.get_source_name(config['selection']['target']), 
                                             fit_config['pivotE'] if 'pivotE' in fit_config.keys() else None,
                                             e0_free = args.pivotE_free)
                elif fit_config['source_spec'] == 'PLSuperExpCutoff':
                    gta, _ = set_src_spec_plexpcut(gta, gta.get_source_name(config['selection']['target']),
                                                   fit_config['pivotE'] if 'pivotE' in fit_config.keys() else None,
                                                   e0_free = args.pivotE_free)
                #elif fit_config['source_spec'] == 'LogParabola':
                    #gta = set_src_spec_lp(gta, gta.get_source_name(config['selection']['target']))
                else:
                    logging.warning("Spectrum {0:s} not supported, spectrum not changed".format(fit_config['source_spec']))

        # restore spectrum from catalog
        if args.restorecatspec:
            #for k in ['alpha','Index','Index1']:
                #if k in spec_cat.spectral_pars.keys():
                    #spec_cat.spectral_pars[k]['value'] -= 0.5
            #spec_cat.spectral_pars['Eb']['free'] = True
            #spec_cat.spectral_pars['Eb']['value'] = 2000.
            #spec_cat.spectral_pars['alpha']['value'] = 1.8
            #print spec_cat.spectral_pars
            gta.set_source_spectrum(config['selection']['target'],
                spectrum_type = spec_cat['SpectrumType'],
                spectrum_pars= spec_cat.spectral_pars)
            logging.info("restored catalog spectrum")

        # for some sources modeled with PL with super exponential cutoff I 
        # have to do this to get a nice SED, but not for 3C454.3!
        if gta.roi.get_source_by_name(config['selection']['target'])['SpectrumType'] ==\
            'PLSuperExpCutoff' and args.sethardexpcutoff :
            pars = {}
            old_spec_pars = copy.deepcopy(gta.roi.get_source_by_name(config['selection']['target']))
            for k in ['Prefactor','Scale','Index1','Index2','Cutoff']:
                pars[k] = old_spec_pars.spectral_pars[k]
            if config['selection']['target'] == '3C454.3':
                # values from Romoli et al 2017
                pars['Prefactor']['value'] = 4.7
                pars['Index1']['value'] = 1.87
                pars['Index2']['value'] = 0.4
                pars['Cutoff']['value'] = 1100.
                pars['Cutoff']['scale'] = 1.
                pars['Cutoff']['min'] = 100.
                pars['Cutoff']['max'] = 10000.
            else:
                pars['Index1']['value'] = 1.8
                pars['Index2']['value'] = 1.
                pars['Cutoff']['value'] = 5e4
            gta.set_source_spectrum(config['selection']['target'],
            #    spectrum_type = 'PLSuperExpCutoff2',
                spectrum_type = 'PLSuperExpCutoff',
                spectrum_pars= pars)
            logging.info("changed spectral parameters to {0}".format(
                gta.roi.get_source_by_name(config['selection']['target']).spectral_pars))
        else:
            old_spec_pars = None

        if args.forcepl and not \
            gta.roi.get_source_by_name(config['selection']['target'])['SpectrumType'] == 'PowerLaw':

            gta, _ = set_src_spec_pl(gta, gta.get_source_name(config['selection']['target']))
            args.state += "_pl"

        if args.state.find('ebl') >= 0:
            gta = fa.utils.add_ebl_atten(gta,config['selection']['target'],fit_config['z'])
            
        gta = set_free_pars_avg(gta, fit_config, freezesupexp = args.freezesupexp)
        f,gta = fit_with_retries(gta, fit_config, config['selection']['target'], 
                                    alt_spec_pars = old_spec_pars)

        logging.debug(f)
        try:
            get_best_fit_covar(gta, config['selection']['target'], prefix = args.state)
        except IndexError:
            logging.error("Covariance matrix calculation failed")

        #relocalize central source and refit
        if args.relocalize and type(config['selection']['target']) == str:
            loc = gta.localize(config['selection']['target'], make_plots=args.make_plots,
                                free_background = fit_config['reloc_bkg'],
                                free_radius = fit_config['reloc_rad'],
                                update=True)

            logging.info('new position is {0[pos_offset]:.3f} degrees from old position'.format(loc))
            logging.info('Pos uncertainty is {0[pos_r68]:.3f} (68%); {0[pos_r95]:.3f} (95%) degrees'.format(loc))
            logging.info('Refitting with new source position ...')
            logging.info('free source parameters:')
            for s in  gta.get_sources() :                                                           
                for k in s.spectral_pars.keys():
                    if s.spectral_pars[k]['free']:   
                        logging.info('{0:s}: {1:s}'.format(s.name, k))
            f = gta.fit()
            #f,gta = fit_with_retries(gta, fit_config, config['selection']['target'])

        gta.print_roi()
        gta.write_roi(args.state)

    elif args.state == 'lcmonthly':
        gta.setup()
        gta.load_roi('avgspec') # reload the average spectral fit
        logging.info('Running the 30-day bin' + \
            'light curve for {0[target]:s}'.format(config['selection']))
        lc = gta.lightcurve(config['selection']['target'],
                                binsz = 30. * 24. * 60. * 60.)

    model = {'Scale': 1000., 'Index' : fit_config['new_src_pl_index'], 'SpatialModel' : 'PointSource'}
    if args.addnewsrcs:
        gta.load_roi(args.state) # reload the average spectral fit

        max_sqrt_ts = 1000.
        irun = 0
        # define the test source
        #model = {'Index' : 2.0, 'SpatialModel' : 'PointSource'}

        # run ts map and add new sources with sqrt(ts) > 5
        # reoptimize iteratively for each new source
        # this is only done for outer RoI
        while max_sqrt_ts >= fit_config['max_sqrt_ts']:
            # run ts and residual maps
            ts_maps = gta.tsmap(args.state,model=model, 
                write_fits = True, write_npy = True, make_plots = args.make_plots)
            # get the skydirs
            #coords = ts_maps['sqrt_ts'].get_pixel_skydirs()
            coords = ts_maps['sqrt_ts'].geom.get_coord()
            if ts_maps['sqrt_ts'].geom.coordsys == 'CEL':
                frame = 'fk5'
            elif ts_maps['sqrt_ts'].geom.coordsys == 'GAL':
                frame = 'galactic'
            c = SkyCoord(coords[0], coords[1], unit = 'deg', 
                frame = frame)

            #sqrt_ts = ts_maps['sqrt_ts'].get_map_values(coords.ra, coords.dec) # these are all nans. workaround: load fits file
            #sqrt_ts_map = Map.create_from_fits(path.join(gta.workdir,ts_maps['file']),hdu = 'SQRT_TS_MAP')
            #n_map = Map.create_from_fits(path.join(gta.workdir,ts_maps['file']),hdu = 'N_MAP')

            sqrt_ts_map = WcsNDMap.read(path.join(gta.workdir,ts_maps['file']),hdu = 'SQRT_TS_MAP')
            n_map = WcsNDMap.read(path.join(gta.workdir,ts_maps['file']),hdu = 'N_MAP')

            sqrt_ts = sqrt_ts_map.data
            amplitudes = n_map.data
            
            # get the angular separation from RoI center
            sep = gta.roi.skydir.separation(c)
            # mask nan values and pixels close to central source
            m = np.isfinite(sqrt_ts) & (sep.value > fit_config['new_src_search_rad'])
            if not np.sum(m):
                logging.warning('No pixels that are finite at distance > {0[new_src_search_rad]:.2f}'.format(fit_config))
                raise RuntimeError

            # get max ts value 
            max_sqrt_ts = np.max(sqrt_ts[m])

            if max_sqrt_ts < fit_config['max_sqrt_ts']: break

            # get the coords of max ts
            idx = np.argmax(sqrt_ts[m])
            logging.info('Found new source with sqrt(ts) = {0:.2f} at ra,dec = {1:.2f}, {2:.2f}'.format(
                            sqrt_ts[m][idx], c.ra[m][idx].value, c.dec[m][idx].value))
            # add a new source
            csrc = SkyCoord(ra = c.ra[m][idx], dec = c.dec[m][idx], frame = 'fk5')
            if csrc.dec.value < 0.:
                sign = '-'
            else:
                sign = '+'
            newname = 'j{0:02.0f}{1:02.0f}{2:s}{3:02.0f}{4:02.0f}'.format(
                                        csrc.ra.hms.h, csrc.ra.hms.m, sign, 
                                        np.abs(csrc.dec.dms.d), np.abs(csrc.dec.dms.m))
            gta.add_source(newname,{
                                'ra' : c.ra[m][idx].value, 'dec' : c.dec[m][idx].value,
                                'SpectrumType' : 'PowerLaw', 'Index' : fit_config['new_src_pl_index'],
                                'Scale' : 1000, 'Prefactor' : amplitudes[m][idx],
                                'SpatialModel' : 'PointSource' })
            logging.debug('Amplitude of source: {0}'.format(amplitudes[m][idx]))

            gta.free_source(newname, pars=['norm','index'], free = True)
            f = gta.fit()
            gta.print_roi()
            irun += 1

        # if new sources where added, save output
        if irun > 0:
            gta.print_roi()
#            gta = reset_diff_filenames(gta)
            # refit the model with new sources present
            gta = set_free_pars_avg(gta, fit_config)
            f,gta = fit_with_retries(gta, fit_config, config['selection']['target'])
            gta.write_roi(args.state)

    else:
        ts_maps = gta.tsmap(args.state,model=model, 
            write_fits = True, write_npy = True, make_plots = args.make_plots)
    try:
        resid_maps = gta.residmap(args.state,model=model, make_plots=args.make_plots, write_fits = True, write_npy = True)
    except:
        logging.error("Residual map computation and plotting failed")

    if args.profile2d:
        compute_profile2d(gta, config['selection']['target'], prefix = args.state, 
            sigma = 5., xsteps = 30, ysteps = 31)

    if args.createsed:
        if fit_config.get('force_free_index', False):
            gta.free_index(config['selection']['target'], free = False)
            gta.free_index(config['selection']['target'], free = True)
        gta.load_roi(args.state) # reload the average spectral fit
        logging.info('Running sed for {0[target]:s}'.format(config['selection']))
        sed = gta.sed(config['selection']['target'],
                        prefix = args.state,
                        #outfile = 'sed.fits',
                        #free_radius =  #sed_config['free_radius'],
                        #free_background= #sed_config['free_background'],
                        #free_pars = fa.allnorm,
                        make_plots = args.make_plots,
                        #cov_scale = sed_config['cov_scale'],
                        #use_local_index = sed_config['use_local_index'],
                        #use_local_index = True, # sed_config['use_local_index'],
                        #bin_index = sed_config['bin_index']
                        )
        logging.info("SED covariance: {0}".format(sed['param_covariance']))

        # generate additional SEDs
        for src in fit_config.get('additional_seds', []):
            sed = gta.sed(src,
                          prefix = args.state,
                          #outfile = 'sed.fits',
                          #free_radius =  #sed_config['free_radius'],
                          #free_background= #sed_config['free_background'],
                          #free_pars = fa.allnorm,
                          make_plots = args.make_plots,
                          #cov_scale = sed_config['cov_scale'],
                          #use_local_index = sed_config['use_local_index'],
                          #use_local_index = True, # sed_config['use_local_index'],
                          #bin_index = sed_config['bin_index']
                          )

    if args.srcprob:
        logging.info("Running srcprob with srcmdl {0:s}".format(args.state))
        gta.compute_srcprob(xmlfile=args.state, overwrite = True)

    return f, gta, fit_config