示例#1
0
def convert_obs_groups_binning_def_michi_to_default():
    """Convert observation groups binning definition "michi" to "default".
    """
    # observation groups binning definition "michi"

    # alt az bin edges definitions
    altitude_edges = Angle(
        [0, 20, 23, 27, 30, 33, 37, 40, 44, 49, 53, 58, 64, 72, 90], 'degree')
    azimuth_edges = Angle([-90, 90, 270], 'degree')

    # convert observation groups binning definition "michi" to "default"

    list_obs_group_axis = [
        ObservationGroupAxis('ALT', altitude_edges, fmt='edges'),
        ObservationGroupAxis('AZ', azimuth_edges, fmt='edges')
    ]
    obs_groups_michi = ObservationGroups(list_obs_group_axis)
    print("Observation groups 'michi':")
    print(obs_groups_michi.obs_groups_table)
    # save
    outfile = 'bg_observation_groups_michi.ecsv'
    print('Writing {}'.format(outfile))
    obs_groups_michi.write(outfile)

    # lookup table: equivalences in group/file naming "defualt" <-> "michi"
    # 3 columns: GROUP_ID, ALT_ID, AZ_ID
    # 28 rows: 1 per GROUP_ID

    lookup_obs_groups_michi = Table()
    n_cols = 1 + len(list_obs_group_axis)
    n_rows = obs_groups_michi.n_groups
    lookup_obs_groups_michi['GROUP_ID'] = np.zeros(n_rows, dtype=np.int)
    lookup_obs_groups_michi['ALT_ID'] = np.zeros(n_rows, dtype=np.int)
    lookup_obs_groups_michi['AZ_ID'] = np.zeros(n_rows, dtype=np.int)

    # loop over each observation group axis
    count_groups = 0
    for alt_id in np.arange(len(altitude_edges) - 1):
        for az_id in np.arange(len(azimuth_edges) - 1):
            lookup_obs_groups_michi['GROUP_ID'][count_groups] = count_groups
            lookup_obs_groups_michi['ALT_ID'][count_groups] = alt_id
            lookup_obs_groups_michi['AZ_ID'][count_groups] = az_id
            count_groups += 1

    print("lookup table:")
    print(lookup_obs_groups_michi)

    # save
    outfile = 'lookup_obs_groups_michi.ecsv'
    print('Writing {}'.format(outfile))
    # `~astropy.io.ascii` always overwrites the file
    ascii.write(lookup_obs_groups_michi,
                outfile,
                format='ecsv',
                fast_writer=False)
def convert_obs_groups_binning_def_michi_to_default():
    """Convert observation groups binning definition "michi" to "default".
    """
    # observation groups binning definition "michi"

    # alt az bin edges definitions
    altitude_edges = Angle([0, 20, 23, 27, 30, 33, 37, 40, 44, 49, 53, 58, 64, 72, 90], 'degree')
    azimuth_edges = Angle([-90, 90, 270], 'degree')

    # convert observation groups binning definition "michi" to "default"

    list_obs_group_axis = [ObservationGroupAxis('ALT', altitude_edges, 'bin_edges'),
                           ObservationGroupAxis('AZ', azimuth_edges, 'bin_edges')]
    obs_groups_michi = ObservationGroups(list_obs_group_axis)
    print("Observation groups 'michi':")
    print(obs_groups_michi.obs_groups_table)
    # save
    outfile = 'bg_observation_groups_michi.ecsv'
    print('Writing {}'.format(outfile))
    obs_groups_michi.write(outfile)

    # lookup table: equivalences in group/file naming "defualt" <-> "michi"
    # 3 columns: GROUP_ID, ALT_ID, AZ_ID
    # 28 rows: 1 per GROUP_ID

    lookup_obs_groups_michi = Table()
    n_cols = 1 + len(list_obs_group_axis)
    n_rows = obs_groups_michi.n_groups
    lookup_obs_groups_michi['GROUP_ID'] = np.zeros(n_rows, dtype=np.int)
    lookup_obs_groups_michi['ALT_ID'] = np.zeros(n_rows, dtype=np.int)
    lookup_obs_groups_michi['AZ_ID'] = np.zeros(n_rows, dtype=np.int)

    # loop over each observation group axis
    count_groups = 0
    for alt_id in np.arange(len(altitude_edges) - 1):
        for az_id in np.arange(len(azimuth_edges) - 1):
            lookup_obs_groups_michi['GROUP_ID'][count_groups] = count_groups
            lookup_obs_groups_michi['ALT_ID'][count_groups] = alt_id
            lookup_obs_groups_michi['AZ_ID'][count_groups] = az_id
            count_groups += 1

    print("lookup table:")
    print(lookup_obs_groups_michi)

    # save
    outfile = 'lookup_obs_groups_michi.ecsv'
    print('Writing {}'.format(outfile))
    # `~astropy.io.ascii` always overwrites the file
    ascii.write(lookup_obs_groups_michi, outfile,
                format='ecsv', fast_writer=False)
示例#3
0
def create_dummy_observation_grouping():
    """Define dummy observation grouping.

    Define an observation grouping with only one group.

    Returns
    -------
    obs_groups : `~gammapy.data.ObservationGroups`
        Observation grouping.
    """
    alt_axis = ObservationGroupAxis('ALT', ALT_RANGE, fmt='edges')
    az_axis = ObservationGroupAxis('AZ', AZ_RANGE, fmt='edges')
    obs_groups = ObservationGroups([alt_axis, az_axis])
    obs_groups.obs_groups_table['GROUP_ID'][0] = GROUP_ID

    return obs_groups
def create_dummy_observation_grouping():
    """Define dummy observation grouping.

    Define an observation grouping with only one group.

    Returns
    -------
    obs_groups : `~gammapy.data.ObservationGroups`
        Observation grouping.
    """
    alt_axis = ObservationGroupAxis('ALT', ALT_RANGE, fmt='edges')
    az_axis = ObservationGroupAxis('AZ', AZ_RANGE, fmt='edges')
    obs_groups = ObservationGroups([alt_axis, az_axis])
    obs_groups.obs_groups_table['GROUP_ID'][0] = GROUP_ID

    return obs_groups
def plot_bg_cube_model_comparison(input_dir1, binning_format1, name1,
                                  input_dir2, binning_format2, name2):
    """
    Plot background cube model comparison.

    Produce a few figures for comparing 2 sets of bg cube models (1
    and 2), supposing same binning in both sets of observation
    groups (a.k.a. observation bin).

    Each figure corresponds to 1 observation group.
    Plot strategy in each figure:

    * Images:
        * rows: similar energy bin
        * cols: same bg cube model set
    * Spectra:
        * rows: similar det bin
        * cols: compare both bg cube model sets

    The script can be customized by setting a few global variables:

    * **group_ids_selection**: groups to compare; if empty: use all
      groups

    * **NORMALIZE**: normalization to use for the models. If
      activate, model 1 is normalized to match model 2. This can be
      useful, when comparing reco models w.r.t. true ones. Options:
          * *0*: do not normalize
          * *1*: normalize w.r.t. cube integral
          * *2*: normalize w.r.t images integral; each image (i.e.
            energy bin/slice) is normalized independently; this
            option can alter the spectral shape of the bg rate, but
            is is the way how smoothing method *michi* normalizes the
            background cube model, hence it is necessary to compare
            to those models that use that particular smoothing

    Parameters
    ----------
    input_dir1, input_dir2 : str
        Directory where the corresponding set of bg cube models is stored.
    binning_format1, binning_format2 : {'default', 'michi'}
        String specifying the binning format; accepted values are:

        * *default* for the Gammapy format from
          `~gammapy.data.ObservationGroups`; an observation groups
          ECVS file is expected in the bg cube models dir.
        * *michi* for the binning used by Michale Mayer;
          this script has methods to convert it to the
          *default* format.
          ref: [Mayer2015]_ (section 5.2.4)

    name1, name2 : str
        Name to use for plot labels/legends.
    """
    # check binning
    accepted_binnings = ['default', 'michi']

    if ((binning_format1 not in accepted_binnings) or
            (binning_format2 not in accepted_binnings)):
        raise ValueError("Invalid binning format: {0} or {1}".format(binning_format1,
                                                                     binning_format2))

    # convert binning, if necessary
    if binning_format1 == 'michi' or binning_format2 == 'michi':
        convert_obs_groups_binning_def_michi_to_default()

    # loop over observation groups: use binning of the 1st set to compare
    if binning_format1 == 'michi':
        observation_groups = obs_groups_michi
    else:
        observation_groups = ObservationGroups.read(input_dir1 + '/bg_observation_groups.ecsv')
    groups = observation_groups.list_of_groups
    print()
    print("list of groups", groups)

    for group in groups:
        print()
        print("group ", group)
        # compare only observation groups in group IDs selection
        # if empty, use all groups:
        if len(group_ids_selection) is not 0:
            groups_to_compare = group_ids_selection
        else:
            groups_to_compare = groups
        if group in groups_to_compare:
            group_info = observation_groups.info_group(group)
            print(group_info)

            # get cubes
            if binning_format1 == 'michi':
                # find corresponding ALT_ID, AZ_ID in lookup table
                i_alt, i_az = look_obs_groups_michi(group)
                filename1 = input_dir1 + '/hist_alt' + str(i_alt) + \
                            '_az' + str(i_az) + '.fits.gz'
            else:
                filename1 = input_dir1 + '/bg_cube_model_group' + str(group) + \
                            '_table.fits.gz'
            if binning_format2 == 'michi':
                # find corresponding ALT_ID, AZ_ID in lookup table
                i_alt, i_az = look_obs_groups_michi(group)
                filename2 = input_dir2 + '/hist_alt' + str(i_alt) + \
                            '_az' + str(i_az) + '.fits.gz'
            else:
                filename2 = input_dir2 + '/bg_cube_model_group' + str(group) + \
                            '_table.fits.gz'
            print('filename1', filename1)
            print('filename2', filename2)
            bg_cube_model1 = CubeBackgroundModel.read(filename1,
                                                      format='table').background_cube
            bg_cube_model2 = CubeBackgroundModel.read(filename2,
                                                      format='table').background_cube

            # normalize 1 w.r.t. 2 (i.e. true w.r.t. reco)
            if NORMALIZE == 1:
                # normalize w.r.t. cube integral
                integral1 = bg_cube_model1.integral
                integral2 = bg_cube_model2.integral
                bg_cube_model1.data *= integral2 / integral1
            elif NORMALIZE == 2:
                # normalize w.r.t images integral (normalize each image on its own)
                integral_images1 = bg_cube_model1.integral_images
                integral_images2 = bg_cube_model2.integral_images
                for i_energy in np.arange(len(bg_cube_model1.energy_edges) - 1):
                    bg_cube_model1.data[i_energy] *= (integral_images2 / integral_images1)[i_energy]

            # compare binning
            print("energy edges 1", bg_cube_model1.energy_edges)
            print("energy edges 2", bg_cube_model2.energy_edges)
            print("detector edges 1 Y", bg_cube_model1.coordy_edges)
            print("detector edges 2 Y", bg_cube_model2.coordy_edges)
            print("detector edges 1 X", bg_cube_model1.coordx_edges)
            print("detector edges 2 X", bg_cube_model2.coordx_edges)

            # make sure that both cubes use the same units for the plots
            bg_cube_model2.data = bg_cube_model2.data.to(bg_cube_model1.data.unit)

            # plot
            fig, axes = plt.subplots(nrows=2, ncols=3)
            fig.set_size_inches(30., 15., forward=True)
            plt.suptitle(group_info)

            # plot images
            #  rows: similar energy bin
            #  cols: same file
            # bg_cube_model1.plot_image(energy=Quantity(0.5, 'TeV'), ax=axes[0, 0])
            bg_cube_model1.plot_image(energy=Quantity(5., 'TeV'), ax=axes[0, 0])
            axes[0, 0].set_title("{0}: {1}".format(name1, axes[0, 0].get_title()))
            bg_cube_model1.plot_image(energy=Quantity(50., 'TeV'), ax=axes[1, 0])
            axes[1, 0].set_title("{0}: {1}".format(name1, axes[1, 0].get_title()))
            # bg_cube_model2.plot_image(energy=Quantity(0.5, 'TeV'), ax=axes[0, 1])
            bg_cube_model2.plot_image(energy=Quantity(5., 'TeV'), ax=axes[0, 1])
            axes[0, 1].set_title("{0}: {1}".format(name2, axes[0, 1].get_title()))
            bg_cube_model2.plot_image(energy=Quantity(50., 'TeV'), ax=axes[1, 1])
            axes[1, 1].set_title("{0}: {1}".format(name2, axes[1, 1].get_title()))

            # plot spectra
            #  rows: similar det bin
            #  cols: compare both files
            bg_cube_model1.plot_spectrum(coord=Angle([0., 0.], 'degree'),
                                         ax=axes[0, 2],
                                         style_kwargs=dict(color='blue',
                                                           label=name1))
            spec_title1 = axes[0, 2].get_title()
            bg_cube_model2.plot_spectrum(coord=Angle([0., 0.], 'degree'),
                                         ax=axes[0, 2],
                                         style_kwargs=dict(color='red',
                                                           label=name2))
            spec_title2 = axes[0, 2].get_title()
            if spec_title1 != spec_title2:
                s_error = "Expected same det binning, but got "
                s_error += "\"{0}\" and \"{1}\"".format(spec_title1, spec_title2)
                raise ValueError(s_error)
            else:
                axes[0, 2].set_title(spec_title1)
            axes[0, 2].legend()

            bg_cube_model1.plot_spectrum(coord=Angle([2., 2.], 'degree'),
                                         ax=axes[1, 2],
                                         style_kwargs=dict(color='blue',
                                                           label=name1))
            spec_title1 = axes[1, 2].get_title()
            bg_cube_model2.plot_spectrum(coord=Angle([2., 2.], 'degree'),
                                         ax=axes[1, 2],
                                         style_kwargs=dict(color='red',
                                                           label=name2))
            spec_title2 = axes[1, 2].get_title()
            if spec_title1 != spec_title2:
                s_error = "Expected same det binning, but got "
                s_error += "\"{0}\" and \"{1}\"".format(spec_title1, spec_title2)
                raise ValueError(s_error)
            else:
                axes[1, 2].set_title(spec_title1)
            axes[1, 2].legend()

            plt.draw()

            # save
            outfile = "bg_cube_model_comparison_group{}.png".format(group)
            print('Writing {}'.format(outfile))
            fig.savefig(outfile)

    plt.show()  # don't leave at the end
示例#6
0
def plot_bg_cube_model_comparison(input_dir1, binning_format1, name1,
                                  input_dir2, binning_format2, name2):
    """
    Plot background cube model comparison.

    Produce a few figures for comparing 2 sets of bg cube models (1
    and 2), supposing same binning in both sets of observation
    groups (a.k.a. observation bin).

    Each figure corresponds to 1 observation group.
    Plot strategy in each figure:

    * Images:
        * rows: similar energy bin
        * cols: same bg cube model set
    * Spectra:
        * rows: similar det bin
        * cols: compare both bg cube model sets

    The script can be customized by setting a few global variables:

    * **group_ids_selection**: groups to compare; if empty: use all
      groups

    * **NORMALIZE**: normalization to use for the models. If
      activate, model 1 is normalized to match model 2. This can be
      useful, when comparing reco models w.r.t. true ones. Options:
          * *0*: do not normalize
          * *1*: normalize w.r.t. cube integral
          * *2*: normalize w.r.t images integral; each image (i.e.
            energy bin/slice) is normalized independently; this
            option can alter the spectral shape of the bg rate, but
            is is the way how smoothing method *michi* normalizes the
            background cube model, hence it is necessary to compare
            to those models that use that particular smoothing

    Parameters
    ----------
    input_dir1, input_dir2 : str
        Directory where the corresponding set of bg cube models is stored.
    binning_format1, binning_format2 : {'default', 'michi'}
        String specifying the binning format; accepted values are:

        * *default* for the Gammapy format from
          `~gammapy.data.ObservationGroups`; an observation groups
          ECVS file is expected in the bg cube models dir.
        * *michi* for the binning used by Michale Mayer;
          this script has methods to convert it to the
          *default* format.
          ref: [Mayer2015]_ (section 5.2.4)

    name1, name2 : str
        Name to use for plot labels/legends.
    """
    # check binning
    accepted_binnings = ['default', 'michi']

    if ((binning_format1 not in accepted_binnings)
            or (binning_format2 not in accepted_binnings)):
        raise ValueError("Invalid binning format: {0} or {1}".format(
            binning_format1, binning_format2))

    # convert binning, if necessary
    if binning_format1 == 'michi' or binning_format2 == 'michi':
        convert_obs_groups_binning_def_michi_to_default()

    # loop over observation groups: use binning of the 1st set to compare
    if binning_format1 == 'michi':
        observation_groups = obs_groups_michi
    else:
        observation_groups = ObservationGroups.read(
            input_dir1 + '/bg_observation_groups.ecsv')
    groups = observation_groups.list_of_groups
    print()
    print("list of groups", groups)

    for group in groups:
        print()
        print("group ", group)
        # compare only observation groups in group IDs selection
        # if empty, use all groups:
        if len(group_ids_selection) is not 0:
            groups_to_compare = group_ids_selection
        else:
            groups_to_compare = groups
        if group in groups_to_compare:
            group_info = observation_groups.info_group(group)
            print(group_info)

            # get cubes
            if binning_format1 == 'michi':
                # find corresponding ALT_ID, AZ_ID in lookup table
                i_alt, i_az = look_obs_groups_michi(group)
                filename1 = input_dir1 + '/hist_alt' + str(i_alt) + \
                            '_az' + str(i_az) + '.fits.gz'
            else:
                filename1 = input_dir1 + '/bg_cube_model_group' + str(group) + \
                            '_table.fits.gz'
            if binning_format2 == 'michi':
                # find corresponding ALT_ID, AZ_ID in lookup table
                i_alt, i_az = look_obs_groups_michi(group)
                filename2 = input_dir2 + '/hist_alt' + str(i_alt) + \
                            '_az' + str(i_az) + '.fits.gz'
            else:
                filename2 = input_dir2 + '/bg_cube_model_group' + str(group) + \
                            '_table.fits.gz'
            print('filename1', filename1)
            print('filename2', filename2)
            bg_cube_model1 = FOVCubeBackgroundModel.read(
                filename1, format='table').background_cube
            bg_cube_model2 = FOVCubeBackgroundModel.read(
                filename2, format='table').background_cube

            # normalize 1 w.r.t. 2 (i.e. true w.r.t. reco)
            if NORMALIZE == 1:
                # normalize w.r.t. cube integral
                integral1 = bg_cube_model1.integral
                integral2 = bg_cube_model2.integral
                bg_cube_model1.data *= integral2 / integral1
            elif NORMALIZE == 2:
                # normalize w.r.t images integral (normalize each image on its own)
                integral_images1 = bg_cube_model1.integral_images
                integral_images2 = bg_cube_model2.integral_images
                for i_energy in np.arange(
                        len(bg_cube_model1.energy_edges) - 1):
                    bg_cube_model1.data[i_energy] *= (
                        integral_images2 / integral_images1)[i_energy]

            # compare binning
            print("energy edges 1", bg_cube_model1.energy_edges)
            print("energy edges 2", bg_cube_model2.energy_edges)
            print("detector edges 1 Y", bg_cube_model1.coordy_edges)
            print("detector edges 2 Y", bg_cube_model2.coordy_edges)
            print("detector edges 1 X", bg_cube_model1.coordx_edges)
            print("detector edges 2 X", bg_cube_model2.coordx_edges)

            # make sure that both cubes use the same units for the plots
            bg_cube_model2.data = bg_cube_model2.data.to(
                bg_cube_model1.data.unit)

            # plot
            fig, axes = plt.subplots(nrows=2, ncols=3)
            fig.set_size_inches(30., 15., forward=True)
            plt.suptitle(group_info)

            # plot images
            #  rows: similar energy bin
            #  cols: same file
            # bg_cube_model1.plot_image(energy=Quantity(0.5, 'TeV'), ax=axes[0, 0])
            bg_cube_model1.plot_image(energy=Quantity(5., 'TeV'),
                                      ax=axes[0, 0])
            axes[0, 0].set_title("{0}: {1}".format(name1, axes[0,
                                                               0].get_title()))
            bg_cube_model1.plot_image(energy=Quantity(50., 'TeV'),
                                      ax=axes[1, 0])
            axes[1, 0].set_title("{0}: {1}".format(name1, axes[1,
                                                               0].get_title()))
            # bg_cube_model2.plot_image(energy=Quantity(0.5, 'TeV'), ax=axes[0, 1])
            bg_cube_model2.plot_image(energy=Quantity(5., 'TeV'),
                                      ax=axes[0, 1])
            axes[0, 1].set_title("{0}: {1}".format(name2, axes[0,
                                                               1].get_title()))
            bg_cube_model2.plot_image(energy=Quantity(50., 'TeV'),
                                      ax=axes[1, 1])
            axes[1, 1].set_title("{0}: {1}".format(name2, axes[1,
                                                               1].get_title()))

            # plot spectra
            #  rows: similar det bin
            #  cols: compare both files
            bg_cube_model1.plot_spectrum(coord=Angle([0., 0.], 'degree'),
                                         ax=axes[0, 2],
                                         style_kwargs=dict(color='blue',
                                                           label=name1))
            spec_title1 = axes[0, 2].get_title()
            bg_cube_model2.plot_spectrum(coord=Angle([0., 0.], 'degree'),
                                         ax=axes[0, 2],
                                         style_kwargs=dict(color='red',
                                                           label=name2))
            spec_title2 = axes[0, 2].get_title()
            if spec_title1 != spec_title2:
                s_error = "Expected same det binning, but got "
                s_error += "\"{0}\" and \"{1}\"".format(
                    spec_title1, spec_title2)
                raise ValueError(s_error)
            else:
                axes[0, 2].set_title(spec_title1)
            axes[0, 2].legend()

            bg_cube_model1.plot_spectrum(coord=Angle([2., 2.], 'degree'),
                                         ax=axes[1, 2],
                                         style_kwargs=dict(color='blue',
                                                           label=name1))
            spec_title1 = axes[1, 2].get_title()
            bg_cube_model2.plot_spectrum(coord=Angle([2., 2.], 'degree'),
                                         ax=axes[1, 2],
                                         style_kwargs=dict(color='red',
                                                           label=name2))
            spec_title2 = axes[1, 2].get_title()
            if spec_title1 != spec_title2:
                s_error = "Expected same det binning, but got "
                s_error += "\"{0}\" and \"{1}\"".format(
                    spec_title1, spec_title2)
                raise ValueError(s_error)
            else:
                axes[1, 2].set_title(spec_title1)
            axes[1, 2].legend()

            plt.draw()

            # save
            outfile = "bg_cube_model_comparison_group{}.png".format(group)
            print('Writing {}'.format(outfile))
            fig.savefig(outfile)

    plt.show()  # don't leave at the end
示例#7
0
#get required runs

agnid=list(set(obsid)-set(LMCid)-set(SNid))
mylist=datastore.obs_list(agnid)
zen_ang=[o1.pointing_zen.value for o1 in mylist] 


# Define the grouping
nbins=10
zenith_bins=[0,10,20,30,40,50,90]
#zenith_bins=[min(zen_ang), 10.0, 20.0, 30.0, 45.0, 60.0, max(zen_ang)]
zenith_bins=zenith_bins*u.deg
axes = [ObservationGroupAxis('ZEN_PNT', zenith_bins, fmt='edges')]

# Create the ObservationGroups object
obs_groups = ObservationGroups(axes)

# write it to file
filename = str(outdir + "/group-def.fits")
obs_groups.obs_groups_table.write(filename, overwrite=True)

obs_table_with_group_id = obs_groups.apply(datastore.obs_table.select_obs_id(agnid))
#gammacat exclusion mask

fil_gammacat="/Users/asinha/Gammapy-dev/gammapy-extra/datasets/catalogs/gammacat/gammacat.fits.gz"
cat = SourceCatalogGammaCat(filename=fil_gammacat)
exclusion_table = cat.table.copy()
exclusion_table.rename_column('ra', 'RA')
exclusion_table.rename_column('dec', 'DEC')
radius = exclusion_table['morph_sigma'].data
radius[np.isnan(radius)] = 0.3
from gammapy.utils.nddata import sqrt_space
from gammapy.data import DataStore, ObservationGroupAxis, ObservationGroups
from gammapy.background import EnergyOffsetBackgroundModel
from gammapy.background import OffDataBackgroundMaker

#create a directory
os.mkdir("background")

#observation list
name = "PKS 2155-304"
name = "Crab"
datastore = DataStore.from_dir("$HESS_DATA")
src = SkyCoord.from_name(name)
sep = SkyCoord.separation(src, datastore.obs_table.pointing_radec)
srcruns = (datastore.obs_table[sep < 2.0 * u.deg])
obsid = srcruns['OBS_ID'].data
mylist = datastore.obs_list(obsid[:30])

# Define the grouping
zenith_bins = np.linspace(0, 90, 6)
axes = [ObservationGroupAxis('ZEN_PNT', zenith_bins, fmt='edges')]

# Create the ObservationGroups object
obs_groups = ObservationGroups(axes)

# write it to file
filename = str(scratch_dir / 'group-def.fits')
obs_groups.obs_groups_table.write(filename, overwrite=True)

obs_table_with_group_id = obs_groups.apply(srcruns[0:30])
示例#9
0
def make_cubes(ereco, etrue, use_etrue, center):
    tmpdir = os.path.expandvars('$GAMMAPY_EXTRA') + "/test_datasets/cube/data"
    outdir = tmpdir
    outdir2 = os.path.expandvars(
        '$GAMMAPY_EXTRA') + '/test_datasets/cube/background'

    if os.path.isdir("data"):
        shutil.rmtree("data")
    if os.path.isdir("background"):
        shutil.rmtree("background")
    Path(outdir2).mkdir()

    ds = DataStore.from_dir("$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2")
    ds.copy_obs(ds.obs_table, tmpdir)
    data_store = DataStore.from_dir(tmpdir)
    # Create a background model from the 4 crab run for the counts ouside the exclusion region. it's just for test, normaly you take 8000 thousands AGN runs to build this kind of model
    axes = [ObservationGroupAxis('ZEN_PNT', [0, 49, 90], fmt='edges')]
    obs_groups = ObservationGroups(axes)
    obs_table_with_group_id = obs_groups.apply(data_store.obs_table)
    obs_groups.obs_groups_table.write(outdir2 + "/group-def.fits",
                                      overwrite=True)
    # Exclusion sources table
    cat = SourceCatalogGammaCat()
    exclusion_table = cat.table
    exclusion_table.rename_column('ra', 'RA')
    exclusion_table.rename_column('dec', 'DEC')
    radius = exclusion_table['morph_sigma']
    radius.value[np.isnan(radius)] = 0.3
    exclusion_table['Radius'] = radius
    exclusion_table = Table(exclusion_table)

    bgmaker = OffDataBackgroundMaker(data_store,
                                     outdir2,
                                     run_list=None,
                                     obs_table=obs_table_with_group_id,
                                     ntot_group=obs_groups.n_groups,
                                     excluded_sources=exclusion_table)
    bgmaker.make_model("2D")
    bgmaker.smooth_models("2D")
    bgmaker.save_models("2D")
    bgmaker.save_models(modeltype="2D", smooth=True)

    shutil.move(str(outdir2), str(outdir))
    fn = outdir + '/background/group-def.fits'
    hdu_index_table = bgmaker.make_total_index_table(
        data_store=data_store,
        modeltype='2D',
        out_dir_background_model="background",
        filename_obs_group_table=fn,
        smooth=True)
    fn = outdir + '/hdu-index.fits.gz'
    hdu_index_table.write(fn, overwrite=True)

    offset_band = Angle([0, 2.49], 'deg')

    ref_cube_images = make_empty_cube(image_size=50,
                                      energy=ereco,
                                      center=center)
    ref_cube_exposure = make_empty_cube(image_size=50,
                                        energy=etrue,
                                        center=center,
                                        data_unit="m2 s")

    data_store = DataStore.from_dir(tmpdir)

    refheader = ref_cube_images.sky_image_ref.to_image_hdu().header
    exclusion_mask = SkyMask.read(
        '$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')
    exclusion_mask = exclusion_mask.reproject(reference=refheader)

    # Pb with the load psftable for one of the run that is not implemented yet...
    data_store.hdu_table.remove_row(14)

    cube_maker = StackedObsCubeMaker(empty_cube_images=ref_cube_images,
                                     empty_exposure_cube=ref_cube_exposure,
                                     offset_band=offset_band,
                                     data_store=data_store,
                                     obs_table=data_store.obs_table,
                                     exclusion_mask=exclusion_mask,
                                     save_bkg_scale=True)
    cube_maker.make_cubes(make_background_image=True, radius=10.)
    obslist = [data_store.obs(id) for id in data_store.obs_table["OBS_ID"]]
    ObsList = ObservationList(obslist)
    mean_psf_cube = make_mean_psf_cube(image_size=50,
                                       energy_cube=etrue,
                                       center_maps=center,
                                       center=center,
                                       ObsList=ObsList,
                                       spectral_index=2.3)
    if use_etrue:
        mean_rmf = make_mean_rmf(energy_true=etrue,
                                 energy_reco=ereco,
                                 center=center,
                                 ObsList=ObsList)

    filename_mask = 'exclusion_mask.fits'
    filename_counts = 'counts_cube.fits'
    filename_bkg = 'bkg_cube.fits'
    filename_significance = 'significance_cube.fits'
    filename_excess = 'excess_cube.fits'
    if use_etrue:
        filename_exposure = 'exposure_cube_etrue.fits'
        filename_psf = 'psf_cube_etrue.fits'
        filename_rmf = 'rmf.fits'
        mean_rmf.write(filename_rmf, clobber=True)
    else:
        filename_exposure = 'exposure_cube.fits'
        filename_psf = 'psf_cube.fits'
    exclusion_mask.write(filename_mask, clobber=True)
    cube_maker.counts_cube.write(filename_counts,
                                 format="fermi-counts",
                                 clobber=True)
    cube_maker.bkg_cube.write(filename_bkg,
                              format="fermi-counts",
                              clobber=True)
    cube_maker.significance_cube.write(filename_significance,
                                       format="fermi-counts",
                                       clobber=True)
    cube_maker.excess_cube.write(filename_excess,
                                 format="fermi-counts",
                                 clobber=True)
    cube_maker.exposure_cube.write(filename_exposure,
                                   format="fermi-counts",
                                   clobber=True)
    mean_psf_cube.write(filename_psf, format="fermi-counts", clobber=True)
示例#10
0
# ### Make an observation table defining the run grouping
#
# Prepare a scheme to group observations with similar observing conditions and create a new ObservationTable with the grouping ID for each run

# In[6]:

# Create a background model from the 4 Crab runs for the counts ouside the exclusion region so here outside the Crab
data_store = DataStore.from_dir(
    "$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2")

# Define the grouping you want to use to group the obervations to make the acceptance curves
# Here we use 2 Zenith angle bins only, you can also add efficiency bins for example etc...
axes = [ObservationGroupAxis('ZEN_PNT', [0, 49, 90], fmt='edges')]

# Create the ObservationGroups object
obs_groups = ObservationGroups(axes)
# write it to file
filename = str(scratch_dir / 'group-def.fits')
obs_groups.obs_groups_table.write(filename, overwrite=True)

# Create a new ObservationTable with the column group_id
# You give the runs list you want to use to produce the background model that are in your obs table.
# Here very simple only the 4 Crab runs...
list_ids = [23523, 23526, 23559, 23592]
obs_table_with_group_id = obs_groups.apply(
    data_store.obs_table.select_obs_id(list_ids))

# ### Make table of known gamma-ray sources to exclude
#
# We need a mask to remove known sources from the observation. We use TeVcat and exclude a circular region of at least 0.3° radius. Here since we use Crab runs, we will remove the Crab events from the FOV to select only the OFF events to build the acceptance curves. Of cource normally you use thousand of AGN runs to build coherent acceptance curves.
示例#11
0
d1
d1.obs_table['OBS_ID']
d1
print d1
crab_pos = SkyCoord.from_name('crab')
crab_pos
datastore
datastore.table
datastore.Table
datastore.obs_table
datastore.obs_table["ZEN_PNT"]<20.0
from gammapy.data import ObservationGroups, ObservationGroupAxis
zenith = Angle([0, 30, 40, 50], 'deg')
ntels = [3, 4]
obs_groups = ObservationGroups([
    ObservationGroupAxis('ZENITH', zenith, fmt='edges'),
    ObservationGroupAxis('N_TELS', ntels, fmt='values'),
])
print(obs_groups.info)
obs1=obs_groups.apply(datastore.obs_table)
zenith = Angle([0, 30, 40, 50], 'deg')
ntels = [3, 4]
obs_groups = ObservationGroups([
    ObservationGroupAxis('ZEN_PNT', zenith, fmt='edges'),
    ObservationGroupAxis('N_TELS', ntels, fmt='values'),
])
obs_table

zenith = Angle([0, 30, 40, 50], 'deg')
ntels = [3, 4]
obs_groups = ObservationGroups([
    ObservationGroupAxis('ZEN_PNT', zenith, fmt='edges'),