Пример #1
0
    def _scan_position(self, name, **kwargs):

        saved_state = LikelihoodState(self.like)

        skydir = kwargs.pop('skydir', self.roi[name].skydir)
        scan_cdelt = kwargs.pop('scan_cdelt', 0.02)
        nstep = kwargs.pop('nstep', 5)
        use_cache = kwargs.get('use_cache', True)
        use_pylike = kwargs.get('use_pylike', False)
        optimizer = kwargs.get('optimizer', {})

        # Fit without source
        self.zero_source(name, loglevel=logging.DEBUG)
        fit_output_nosrc = self._fit(loglevel=logging.DEBUG,
                                     **optimizer)
        self.unzero_source(name, loglevel=logging.DEBUG)
        saved_state.restore()
        self.free_norm(name, loglevel=logging.DEBUG)

        lnlmap = Map.create(skydir, scan_cdelt, (nstep, nstep),
                            coordsys=wcs_utils.get_coordsys(self._skywcs))

        src = self.roi.copy_source(name)

        if use_cache and not use_pylike:
            self._create_srcmap_cache(src.name, src)

        scan_skydir = lnlmap.get_pixel_skydirs().transform_to('icrs')
        loglike = []
        for ra, dec in zip(scan_skydir.ra.deg, scan_skydir.dec.deg):

            spatial_pars = {'ra': ra, 'dec': dec}
            self.set_source_morphology(name,
                                       spatial_pars=spatial_pars,
                                       use_pylike=use_pylike)
            fit_output = self._fit(loglevel=logging.DEBUG,
                                   **optimizer)
            loglike += [fit_output['loglike']]

        self.set_source_morphology(name, spatial_pars=src.spatial_pars,
                                   use_pylike=use_pylike)
        saved_state.restore()

        lnlmap.data = np.array(loglike).reshape((nstep, nstep)).T
        lnlmap.data -= fit_output_nosrc['loglike']
        tsmap = Map(2.0 * lnlmap.data, lnlmap.wcs)

        self._clear_srcmap_cache()
        return tsmap, fit_output_nosrc['loglike']
Пример #2
0
    def _scan_position(self, name, **kwargs):

        saved_state = LikelihoodState(self.like)

        skydir = kwargs.pop('skydir', self.roi[name].skydir)
        scan_cdelt = kwargs.pop('scan_cdelt', 0.02)
        nstep = kwargs.pop('nstep', 5)
        use_cache = kwargs.get('use_cache', True)
        use_pylike = kwargs.get('use_pylike', False)
        optimizer = kwargs.get('optimizer', {})

        # Fit without source
        self.zero_source(name, loglevel=logging.DEBUG)
        fit_output_nosrc = self._fit(loglevel=logging.DEBUG, **optimizer)
        self.unzero_source(name, loglevel=logging.DEBUG)
        saved_state.restore()
        self.free_norm(name, loglevel=logging.DEBUG)

        lnlmap = Map.create(skydir,
                            scan_cdelt, (nstep, nstep),
                            coordsys=wcs_utils.get_coordsys(self._skywcs))

        src = self.roi.copy_source(name)

        if use_cache and not use_pylike:
            self._create_srcmap_cache(src.name, src)

        scan_skydir = lnlmap.get_pixel_skydirs().transform_to('icrs')
        loglike = []
        for ra, dec in zip(scan_skydir.ra.deg, scan_skydir.dec.deg):

            spatial_pars = {'ra': ra, 'dec': dec}
            self.set_source_morphology(name,
                                       spatial_pars=spatial_pars,
                                       use_pylike=use_pylike)
            fit_output = self._fit(loglevel=logging.DEBUG, **optimizer)
            loglike += [fit_output['loglike']]

        self.set_source_morphology(name,
                                   spatial_pars=src.spatial_pars,
                                   use_pylike=use_pylike)
        saved_state.restore()

        lnlmap.data = np.array(loglike).reshape((nstep, nstep)).T
        lnlmap.data -= fit_output_nosrc['loglike']
        tsmap = Map(2.0 * lnlmap.data, lnlmap.wcs)

        self._clear_srcmap_cache()
        return tsmap, fit_output_nosrc['loglike']
Пример #3
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff([1E-13, -index, cutoff, curvindex],
                                       scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)

    log_ebins = np.linspace(np.log10(emin), np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=[
                                              os.path.join(
                                                  '$FERMIPY_ROOT', 'data'),
                                              '$FERMI_DIFFUSE_DIR'
                                          ])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff,
                            iso,
                            ltc,
                            ebins,
                            event_class,
                            event_types,
                            gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit,
                            spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(map_skydir[s], fn, ts_thresh,
                                          min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(map_skydir[s], fn, ts_thresh,
                                         min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(c,
                                   wcs_cdelt,
                                   wcs_shape,
                                   'GAL',
                                   wcs_proj,
                                   ebins=ebins)
        map_diff_npred = Map.create(c,
                                    wcs_cdelt,
                                    wcs_shape,
                                    'GAL',
                                    wcs_proj,
                                    ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                          fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                         fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [
        Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
        Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
        Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
        Column(name='flux', dtype='f8', data=o['flux'], unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', data=o['eflux'],
               unit='MeV / (cm2 s)'),
        Column(name='dnde',
               dtype='f8',
               data=o['dnde'],
               unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde',
               dtype='f8',
               data=o['e2dnde'],
               unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', data=o['npred'], unit='ph')
    ]

    tab_diff = Table(cols)

    cols = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV'),
        Column(name='e_ref', dtype='f8', unit='MeV'),
        Column(name='e_max', dtype='f8', unit='MeV'),
        Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', unit='ph'),
        Column(name='ebin_e_min', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_ref', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_max', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_flux',
               dtype='f8',
               unit='ph / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_eflux',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_dnde',
               dtype='f8',
               unit='ph / (MeV cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_e2dnde',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr), ))
    ]

    cols_ebounds = [
        Column(name='E_MIN', dtype='f8', unit='MeV', data=ebins[:-1]),
        Column(name='E_MAX', dtype='f8', unit='MeV', data=ebins[1:]),
    ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, clobber=True)
Пример #4
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    dmmass = kwargs.get('DMmass', 100.0)
    dmchannel = kwargs.get('DMchannel', 'bb')
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff(
            [1E-13, -index, cutoff, curvindex], scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)
    elif sedshape == 'DM':
        fn = spectrum.DMFitFunction([1E-26, dmmass], chan=dmchannel)

    log_ebins = np.linspace(np.log10(emin),
                            np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=[os.path.join('$FERMIPY_ROOT', 'data'),
                                                       '$FERMI_DIFFUSE_DIR'])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff, iso, ltc, ebins,
                            event_class, event_types, gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit, spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(
                map_skydir[s], fn, ts_thresh, min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(
                map_skydir[s], fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(
            c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj, ebins=ebins)
        map_diff_npred = Map.create(
            c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj, ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(
                np.arange(i, min(i + map_nstep, wcs_size)), wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(
                map_skydir[slice(i, i + map_nstep)], fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(
                np.arange(i, min(i + map_nstep, wcs_size)), wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(
                map_skydir[slice(i, i + map_nstep)], fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
            Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
            Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
            Column(name='flux', dtype='f8', data=o[
                   'flux'], unit='ph / (cm2 s)'),
            Column(name='eflux', dtype='f8', data=o[
                   'eflux'], unit='MeV / (cm2 s)'),
            Column(name='dnde', dtype='f8', data=o['dnde'],
                   unit='ph / (MeV cm2 s)'),
            Column(name='e2dnde', dtype='f8',
                   data=o['e2dnde'], unit='MeV / (cm2 s)'),
            Column(name='npred', dtype='f8', data=o['npred'], unit='ph')]

    tab_diff = Table(cols)

    cols = [Column(name='index', dtype='f8'),
            Column(name='e_min', dtype='f8', unit='MeV'),
            Column(name='e_ref', dtype='f8', unit='MeV'),
            Column(name='e_max', dtype='f8', unit='MeV'),
            Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
            Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
            Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
            Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
            Column(name='npred', dtype='f8', unit='ph'),
            Column(name='ebin_e_min', dtype='f8',
                   unit='MeV', shape=(len(ectr),)),
            Column(name='ebin_e_ref', dtype='f8',
                   unit='MeV', shape=(len(ectr),)),
            Column(name='ebin_e_max', dtype='f8',
                        unit='MeV', shape=(len(ectr),)),
            Column(name='ebin_flux', dtype='f8',
                   unit='ph / (cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_eflux', dtype='f8',
                   unit='MeV / (cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_dnde', dtype='f8',
                   unit='ph / (MeV cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_e2dnde', dtype='f8',
                   unit='MeV / (cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr),))]

    cols_ebounds = [Column(name='E_MIN', dtype='f8',
                           unit='MeV', data=ebins[:-1]),
                    Column(name='E_MAX', dtype='f8',
                           unit='MeV', data=ebins[1:]), ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, overwrite=True)
Пример #5
0
    def localize(self, name, **kwargs):
        """Find the best-fit position of a source.  Localization is
        performed in two steps.  First a TS map is computed centered
        on the source with half-width set by ``dtheta_max``.  A fit is
        then performed to the maximum TS peak in this map.  The source
        position is then further refined by scanning the likelihood in
        the vicinity of the peak found in the first step.  The size of
        the scan region is set to encompass the 99% positional
        uncertainty contour as determined from the peak fit.

        Parameters
        ----------
        name : str
            Source name.

        dtheta_max : float
            Maximum offset in RA/DEC in deg from the nominal source
            position that will be used to define the boundaries of the
            TS map search region.

        nstep : int
            Number of steps in longitude/latitude that will be taken
            when refining the source position.  The bounds of the scan
            range are set to the 99% positional uncertainty as
            determined from the TS map peak fit.  The total number of
            sampling points will be nstep**2.

        fix_background : bool
            Fix background parameters when fitting the source position.

        update : bool
            Update the model for this source with the best-fit
            position.  If newname=None this will overwrite the
            existing source map of this source with one corresponding
            to its new location.

        newname : str
            Name that will be assigned to the relocalized source
            when update=True.  If newname is None then the existing
            source name will be used.

        make_plots : bool
           Generate plots.

        write_fits : bool
           Write the output to a FITS file.

        write_npy : bool
           Write the output dictionary to a numpy file.

        optimizer : dict
            Dictionary that overrides the default optimizer settings.

        Returns
        -------
        localize : dict
            Dictionary containing results of the localization
            analysis.  This dictionary is also saved to the
            dictionary of this source in 'localize'.

        """

        name = self.roi.get_source_by_name(name).name

        schema = ConfigSchema(self.defaults['localize'],
                              optimizer=self.defaults['optimizer'])
        schema.add_option('make_plots', False)
        schema.add_option('write_fits', True)
        schema.add_option('write_npy', True)
        schema.add_option('newname', name)
        schema.add_option('prefix', '')
        config = utils.create_dict(self.config['localize'],
                                   optimizer=self.config['optimizer'])
        config = schema.create_config(config, **kwargs)

        nstep = config['nstep']
        dtheta_max = config['dtheta_max']
        update = config['update']
        newname = config['newname']
        prefix = config['prefix']

        self.logger.info('Running localization for %s' % name)

        saved_state = LikelihoodState(self.like)

        if config['fix_background']:
            self.free_sources(free=False, loglevel=logging.DEBUG)

        src = self.roi.copy_source(name)
        skydir = src.skydir
        skywcs = self._skywcs
        src_pix = skydir.to_pixel(skywcs)

        tsmap_fit, tsmap = self._localize_tsmap(name,
                                                prefix=prefix,
                                                dtheta_max=dtheta_max)

        self.logger.debug(
            'Completed localization with TS Map.\n'
            '(ra,dec) = (%10.4f,%10.4f)\n'
            '(glon,glat) = (%10.4f,%10.4f)', tsmap_fit['ra'], tsmap_fit['dec'],
            tsmap_fit['glon'], tsmap_fit['glat'])

        # Fit baseline (point-source) model
        self.free_norm(name)
        fit_output = self._fit(loglevel=logging.DEBUG, **config['optimizer'])

        # Save likelihood value for baseline fit
        loglike0 = fit_output['loglike']
        self.logger.debug('Baseline Model Likelihood: %f', loglike0)

        self.zero_source(name)

        o = {
            'name': name,
            'config': config,
            'fit_success': True,
            'loglike_base': loglike0,
            'loglike_loc': np.nan,
            'dloglike_loc': np.nan
        }

        cdelt0 = np.abs(skywcs.wcs.cdelt[0])
        cdelt1 = np.abs(skywcs.wcs.cdelt[1])
        scan_step = 2.0 * tsmap_fit['r95'] / (nstep - 1.0)

        self.logger.debug(
            'Refining localization search to '
            'region of width: %.4f deg', tsmap_fit['r95'])

        scan_map = Map.create(SkyCoord(tsmap_fit['ra'],
                                       tsmap_fit['dec'],
                                       unit='deg'),
                              scan_step, (nstep, nstep),
                              coordsys=wcs_utils.get_coordsys(skywcs))

        scan_skydir = scan_map.get_pixel_skydirs()

        lnlscan = dict(wcs=scan_map.wcs.to_header().items(),
                       loglike=np.zeros((nstep, nstep)),
                       dloglike=np.zeros((nstep, nstep)),
                       dloglike_fit=np.zeros((nstep, nstep)))

        for i, t in enumerate(scan_skydir):
            model_name = '%s_localize' % (name.replace(' ', '').lower())
            src.set_name(model_name)
            src.set_position(t)
            self.add_source(model_name,
                            src,
                            free=True,
                            init_source=False,
                            save_source_maps=False,
                            loglevel=logging.DEBUG)
            fit_output = self._fit(loglevel=logging.DEBUG,
                                   **config['optimizer'])

            loglike1 = fit_output['loglike']
            lnlscan['loglike'].flat[i] = loglike1
            self.delete_source(model_name, loglevel=logging.DEBUG)

        lnlscan['dloglike'] = lnlscan['loglike'] - np.max(lnlscan['loglike'])
        scan_tsmap = Map(2.0 * lnlscan['dloglike'].T, scan_map.wcs)

        self.unzero_source(name)
        saved_state.restore()
        self._sync_params(name)
        self._update_roi()

        scan_fit, new_skydir = fit_error_ellipse(scan_tsmap, dpix=3)
        o.update(scan_fit)

        o['loglike_loc'] = np.max(
            lnlscan['loglike']) + 0.5 * scan_fit['offset']
        o['dloglike_loc'] = o['loglike_loc'] - o['loglike_base']

        # lnlscan['dloglike_fit'] = \
        #   utils.parabola(np.linspace(0,nstep-1.0,nstep)[:,np.newaxis],
        #                  np.linspace(0,nstep-1.0,nstep)[np.newaxis,:],
        #                  *scan_fit['popt']).reshape((nstep,nstep))

        o['lnlscan'] = lnlscan

        # Best fit position and uncertainty from fit to TS map
        o['tsmap_fit'] = tsmap_fit

        # Best fit position and uncertainty from pylike scan
        o['scan_fit'] = scan_fit
        pix = new_skydir.to_pixel(skywcs)
        o['xpix'] = float(pix[0])
        o['ypix'] = float(pix[1])
        o['deltax'] = (o['xpix'] - src_pix[0]) * cdelt0
        o['deltay'] = (o['ypix'] - src_pix[1]) * cdelt1

        o['offset'] = skydir.separation(new_skydir).deg

        if o['offset'] > dtheta_max:
            o['fit_success'] = False

        if not o['fit_success']:
            self.logger.error(
                'Localization failed.\n'
                '(ra,dec) = (%10.4f,%10.4f)\n'
                '(glon,glat) = (%10.4f,%10.4f)\n'
                'offset = %8.4f deltax = %8.4f '
                'deltay = %8.4f', o['ra'], o['dec'], o['glon'], o['glat'],
                o['offset'], o['deltax'], o['deltay'])
        else:
            self.logger.info(
                'Localization succeeded with '
                'coordinates:\n'
                '(ra,dec) = (%10.4f,%10.4f)\n'
                '(glon,glat) = (%10.4f,%10.4f)\n'
                'offset = %8.4f r68 = %8.4f', o['ra'], o['dec'], o['glon'],
                o['glat'], o['offset'], o['r68'])

        self.roi[name]['localize'] = copy.deepcopy(o)

        if config['make_plots']:
            self._plotter.make_localization_plots(o,
                                                  tsmap,
                                                  self.roi,
                                                  prefix=prefix,
                                                  skydir=scan_skydir)

        if update and o['fit_success']:
            self.logger.info('Updating source %s '
                             'to localized position.', name)
            src = self.delete_source(name)
            src.set_position(new_skydir)
            src.set_name(newname, names=src.names)

            self.add_source(newname, src, free=True)
            fit_output = self.fit(loglevel=logging.DEBUG)
            o['loglike_loc'] = fit_output['loglike']
            o['dloglike_loc'] = o['loglike_loc'] - o['loglike_base']
            src = self.roi.get_source_by_name(newname)
            self.roi[newname]['localize'] = copy.deepcopy(o)
            self.logger.info('LogLike: %12.3f DeltaLogLike: %12.3f',
                             o['loglike_loc'], o['dloglike_loc'])

        if o['fit_success']:
            src = self.roi.get_source_by_name(newname)
            src['pos_sigma'] = o['sigma']
            src['pos_sigma_semimajor'] = o['sigma_semimajor']
            src['pos_sigma_semiminor'] = o['sigma_semiminor']
            src['pos_r68'] = o['r68']
            src['pos_r95'] = o['r95']
            src['pos_r99'] = o['r99']
            src['pos_angle'] = np.degrees(o['theta'])

        self.logger.info('Finished localization.')
        return o