def make_concatenated_index_files():
    """Make index files for all observations combined."""
    datasets = ['agn', 'egal', 'gc', 'gps']

    (BASE_PATH / 'index/all').mkdir(exist_ok=True)

    table = table_vstack([
        Table.read(BASE_PATH / 'index' / dataset / 'obs-index.fits.gz')
        for dataset in datasets
    ],
                         metadata_conflicts='silent')
    # table.meta = OrderedDict()
    add_provenance(table.meta)
    table.meta['dataset'] = 'all'

    path = BASE_PATH / 'index/all/obs-index.fits'
    hdu = fits.BinTableHDU(table)
    hdu.name = 'OBS_INDEX'
    write_fits_gz(hdu, path)

    table = table_vstack([
        Table.read(BASE_PATH / 'index' / dataset / 'hdu-index.fits.gz')
        for dataset in datasets
    ],
                         metadata_conflicts='silent')
    # table.meta = OrderedDict()
    add_provenance(table.meta)
    table.meta['dataset'] = 'all'

    path = BASE_PATH / 'index/all/hdu-index.fits'
    hdu = fits.BinTableHDU(table)
    hdu.name = 'HDU_INDEX'
    write_fits_gz(hdu, path)
Beispiel #2
0
def make_concatenated_index_files():
    """Make index files for all observations combined."""
    datasets = ['agn', 'egal', 'gc', 'gps']

    (BASE_PATH / 'index/all').mkdir(exist_ok=True)

    table = table_vstack([
        Table.read(BASE_PATH / 'index' / dataset / 'obs-index.fits.gz')
        for dataset in datasets
    ],
                         metadata_conflicts='silent')
    table.meta = OrderedDict()
    add_provenance(table.meta)
    table.meta['dataset'] = 'all'

    filename = BASE_PATH / 'index/all/obs-index.fits.gz'
    log.info(f'Writing {filename}')
    table.write(filename, overwrite=True)

    table = table_vstack([
        Table.read(BASE_PATH / 'index' / dataset / 'hdu-index.fits.gz')
        for dataset in datasets
    ],
                         metadata_conflicts='silent')
    table.meta = OrderedDict()
    add_provenance(table.meta)
    table.meta['dataset'] = 'all'

    filename = BASE_PATH / 'index/all/hdu-index.fits.gz'
    log.info(f'Writing {filename}')
    table.write(filename, overwrite=True)
Beispiel #3
0
    def event_table(self):
        """Event table (`~astropy.table.Table`).

        Columns: GLON, GLAT, SOURCE_IDX
        """
        # Create event list table for each source
        tables = []
        for source in self.source_table:
            lon = source['GLON'] * np.ones(source['COUNTS'])
            lat = source['GLAT'] * np.ones(source['COUNTS'])
            coord = SkyCoord(lon, lat, unit='deg', frame='galactic')

            # TODO: scatter positions assuming Gaussian PSF on the sky
            # using SkyOffsetFrame.

            table = Table()
            table['GLON'] = lon
            table['GLAT'] = lat
            table['SOURCE_IDX'] = source.index

            tables.append(table)

        # Stack all tables together
        table = table_vstack(tables)

        return table
Beispiel #4
0
def catalog_xmatch_combine(associations):
    """Combine (vertical stack) association tables.

    Parameters
    ----------
    associations : dict or (str, `~astropy.table.Table`)
        Associations

    Returns
    -------
    combined_associations : `~astropy.table.Table`
        Combined associations table.
    """
    # Add a column to each table with the catalog name
    for name, table in associations.items():
        log.debug('{:10s} has {:5d} rows'.format(name, len(table)))
        if len(table) != 0:
            table['Association_Catalog'] = name

    table = table_vstack(list(associations.values()))

    # Sort table columns the way we like it
    names = [
        'Source_Name', 'Association_Catalog', 'Association_Name', 'Separation'
    ]
    table = table[names]

    log.debug('Combined number of associations: {}'.format(len(table)))

    return table
Beispiel #5
0
def catalog_xmatch_combine(associations):
    """Combine (vertical stack) association tables.

    Parameters
    ----------
    associations : dict or (str, `~astropy.table.Table`)
        Associations

    Returns
    -------
    combined_associations : `~astropy.table.Table`
        Combined associations table.
    """
    # Add a column to each table with the catalog name
    for name, table in associations.items():
        log.debug('{:10s} has {:5d} rows'.format(name, len(table)))
        if len(table) != 0:
            table['Association_Catalog'] = name

    table = table_vstack(list(associations.values()))

    # Sort table columns the way we like it
    names = ['Source_Name', 'Association_Catalog', 'Association_Name', 'Separation']
    table = table[names]

    log.debug('Combined number of associations: {}'.format(len(table)))

    return table
Beispiel #6
0
def add_new_row_to_cache_log(scenario,
        simname, halo_finder, redshift, version_name, **kwargs):
    if type(scenario) == int:
        scenario = str(scenario)

    try:
        new_halo_table_fname = kwargs['fname']
    except KeyError:
        new_halo_table_basename = (simname + '.' + halo_finder + '.' +
            'z' + str(np.round(redshift, 3)) + '.' + version_name + '.hdf5')
        scenario_dirname = get_scenario_cache_fname(scenario)
        new_halo_table_fname = os.path.join(scenario_dirname,
            'halo_tables', simname, halo_finder, new_halo_table_basename)

    redshift = np.round(redshift, 4)
    new_table = Table(
        {'simname': [simname], 'halo_finder': [halo_finder],
        'redshift': [redshift], 'version_name': [version_name],
        'fname': [new_halo_table_fname]}
        )

    try:
        existing_table = kwargs['existing_table']
        return table_vstack([existing_table, new_table])
    except KeyError:
        return new_table
Beispiel #7
0
    def event_table(self):
        """Event table (`~astropy.table.Table`).

        Columns: GLON, GLAT, SOURCE_IDX
        """
        # Create event list table for each source
        tables = []
        for source in self.source_table:
            lon = source['GLON'] * np.ones(source['COUNTS'])
            lat = source['GLAT'] * np.ones(source['COUNTS'])
            coord = SkyCoord(lon, lat, unit='deg', frame='galactic')

            # TODO: scatter positions assuming Gaussian PSF on the sky
            # using SkyOffsetFrame.

            table = Table()
            table['GLON'] = lon
            table['GLAT'] = lat
            table['SOURCE_IDX'] = source.index

            tables.append(table)

        # Stack all tables together
        table = table_vstack(tables)

        return table
def add_new_row_to_cache_log(scenario, simname, halo_finder, redshift,
                             version_name, **kwargs):
    if type(scenario) == int:
        scenario = str(scenario)

    try:
        new_halo_table_fname = kwargs['fname']
    except KeyError:
        new_halo_table_basename = (simname + '.' + halo_finder + '.' + 'z' +
                                   str(np.round(redshift, 3)) + '.' +
                                   version_name + '.hdf5')
        scenario_dirname = get_scenario_cache_fname(scenario)
        new_halo_table_fname = os.path.join(scenario_dirname, 'halo_tables',
                                            simname, halo_finder,
                                            new_halo_table_basename)

    redshift = np.round(redshift, 4)
    new_table = Table({
        'simname': [simname],
        'halo_finder': [halo_finder],
        'redshift': [redshift],
        'version_name': [version_name],
        'fname': [new_halo_table_fname]
    })

    try:
        existing_table = kwargs['existing_table']
        return table_vstack([existing_table, new_table])
    except KeyError:
        return new_table
Beispiel #9
0
    def to_total_table(self):
        """Table with one energy bin per row (`~astropy.table.Table`).

        Columns:

        * ``energy_group_idx`` - Energy group index (int)
        * ``bin_idx`` - Energy bin index (int)
        * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)

        There are no energy columns, because the per-bin energy info
        was lost during grouping.
        """
        tables = [group.bin_table for group in self]
        return table_vstack(tables)
def make_concatenated_index_files():
    """Make index files for all observations combined."""
    # datasets = ['agn', 'egal', 'gc', 'gps']
    datasets = ['gps']

    import os
    dname = str(BASE_PATH) + '/index/all'
    if not os.path.exists(dname):
        os.makedirs(dname)
    # (BASE_PATH / 'index/all').mkdir(exist_ok=True) in python3

    table = table_vstack([
        Table.read(BASE_PATH / 'index' / dataset / 'obs-index.fits.gz')
        for dataset in datasets
    ],
                         metadata_conflicts='silent')
    table.meta = OrderedDict()
    add_provenance(table.meta)
    table.meta['dataset'] = 'all'

    filename = BASE_PATH / 'index/all/obs-index.fits.gz'
    print('Writing {}'.format(filename))
    table.write(filename, overwrite=True)

    table = table_vstack([
        Table.read(BASE_PATH / 'index' / dataset / 'hdu-index.fits.gz')
        for dataset in datasets
    ],
                         metadata_conflicts='silent')
    table.meta = OrderedDict()
    add_provenance(table.meta)
    table.meta['dataset'] = 'all'

    filename = BASE_PATH / 'index/all/hdu-index.fits.gz'
    print('Writing {}'.format(filename))
    table.write(filename, overwrite=True)
from astropy.table import Table
from astropy.table import vstack as table_vstack

filename = os.path.join(
    os.environ["GAMMAPY_DATA"],
    "cta-1dc/data/baseline/gps/gps_baseline_110380.fits",
)
t1 = Table.read(filename, hdu="EVENTS")

filename = os.path.join(
    os.environ["GAMMAPY_DATA"],
    "cta-1dc/data/baseline/gps/gps_baseline_111140.fits",
)
t2 = Table.read(filename, hdu="EVENTS")
tables = [t1, t2]
table = table_vstack(tables, metadata_conflicts="silent")

# In[ ]:

print("Number of events: ", len(table))

# In[ ]:

# Let's select gamma rays with energy above 10 TeV
mask_mc_id = table["MC_ID"] != 1
mask_energy = table["ENERGY"] > 10
mask = mask_mc_id & mask_energy
table2 = table[mask]
print("Number of events after selection:", len(table2))

# When processing a lot or all of the 1DC events, you would write a for loop, and apply the event selection before putting the table in the list of tables, or you might run out of memory. An example is [here](https://github.com/gammasky/cta-dc/blob/master/data/cta_1dc_make_allsky_images.py).
Beispiel #12
0
    def stage_data(self, uids, expand_tarfiles=False, return_json=False):
        """
        Obtain table of ALMA files

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'
        expand_tarfiles : bool
            Expand the tarfiles to obtain lists of all contained files.  If
            this is specified, the parent tarfile will *not* be included
        return_json : bool
            Return a list of the JSON data sets returned from the query.  This
            is primarily intended as a debug routine, but may be useful if there
            are unusual scheduling block layouts.

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """

        dataarchive_url = self._get_dataarchive_url()

        # allow for the uid to be specified as single entry
        if isinstance(uids, str):
            uids = [uids]

        tables = []
        for uu in uids:
            log.debug("Retrieving metadata for {0}".format(uu))
            uid = clean_uid(uu)
            req = self._request('GET', '{dataarchive_url}/rh/data/expand/{uid}'
                                .format(dataarchive_url=dataarchive_url,
                                        uid=uid),
                                cache=False)
            req.raise_for_status()
            try:
                jdata = req.json()
            # Note this exception does not work in Python 2.7
            except json.JSONDecodeError:
                if 'Central Authentication Service' in req.text or 'recentRequests' in req.url:
                    # this indicates a wrong server is being used;
                    # the "pre-feb2020" stager will be phased out
                    # when the new services are deployed
                    raise RemoteServiceError("Failed query!  This shouldn't happen - please "
                                             "report the issue as it may indicate a change in "
                                             "the ALMA servers.")
                else:
                    raise

            if return_json:
                tables.append(jdata)
            else:
                if jdata['type'] != 'PROJECT':
                    log.error("Skipped uid {uu} because it is not a project and"
                              "lacks the appropriate metadata; it is a "
                              "{jdata}".format(uu=uu, jdata=jdata['type']))
                    continue
                if expand_tarfiles:
                    table = uid_json_to_table(jdata, productlist=['ASDM',
                                                                  'PIPELINE_PRODUCT'])
                else:
                    table = uid_json_to_table(jdata,
                                              productlist=['ASDM',
                                                           'PIPELINE_PRODUCT'
                                                           'PIPELINE_PRODUCT_TARFILE',
                                                           'PIPELINE_AUXILIARY_TARFILE'])
                table['sizeInBytes'].unit = u.B
                table.rename_column('sizeInBytes', 'size')
                table.add_column(Column(data=['{dataarchive_url}/dataPortal/{name}'
                                              .format(dataarchive_url=dataarchive_url,
                                                      name=name)
                                              for name in table['name']],
                                        name='URL'))

                isp = self.is_proprietary(uid)
                table.add_column(Column(data=[isp for row in table],
                                        name='isProprietary'))

                tables.append(table)
                log.debug("Completed metadata retrieval for {0}".format(uu))

        if len(tables) == 0:
            raise ValueError("No valid UIDs supplied.")

        if return_json:
            return tables

        table = table_vstack(tables)

        return table
Beispiel #13
0
 def events(self):
     """All events"""
     return table_vstack([_.events for _ in self.target.obs])
Beispiel #14
0
 def events(self):
     """All events"""
     return table_vstack([_.events for _ in self.target.obs])