Пример #1
0
def npy_npz_reader(filename, format='auto', auto_merge=False, **kwargs):
    """
    Read in a Numpy structured array saved to a .npy or .npz file.

    Parameters
    ----------
    source: str
        The pathname to the Numpy save file.
    """

    import numpy as np
    data = np.load(filename)

    if isinstance(data, np.ndarray):
        data = {None: data}

    groups = []
    for groupname in sorted(data):

        d = Data(label=groupname)
        arr = data[groupname]

        if arr.dtype.names is None:
            comp = Component.autotyped(arr)
            d.add_component(comp, label='array')
        else:
            for name in arr.dtype.names:
                comp = Component.autotyped(arr[name])
                d.add_component(comp, label=name)

        groups.append(d)

    return groups
Пример #2
0
def npy_npz_reader(filename, format='auto', auto_merge=False, **kwargs):
    """
    Read in a Numpy structured array saved to a .npy or .npz file.

    Parameters
    ----------
    source: str
        The pathname to the Numpy save file.
    """

    import numpy as np
    data = np.load(filename)

    if isinstance(data, np.ndarray):
        data = {None: data}

    groups = []
    for groupname in sorted(data):

        d = Data(label=groupname)
        arr = data[groupname]

        if arr.dtype.names is None:
            comp = Component.autotyped(arr)
            d.add_component(comp, label='array')
        else:
            for name in arr.dtype.names:
                comp = Component.autotyped(arr[name])
                d.add_component(comp, label=name)

        groups.append(d)

    return groups
Пример #3
0
def astropy_tabular_data(*args, **kwargs):
    """
    Build a data set from a table. We restrict ourselves to tables
    with 1D columns.

    All arguments are passed to
        astropy.table.Table.read(...).
    """

    result = Data()

    table = astropy_table_read(*args, **kwargs)

    result.meta = table.meta

    # Loop through columns and make component list
    for column_name in table.columns:
        c = table[column_name]
        u = c.unit if hasattr(c, 'unit') else c.units

        if table.masked:
            # fill array for now
            try:
                c = c.filled(fill_value=np.nan)
            except (ValueError, TypeError):  # assigning nan to integer dtype
                c = c.filled(fill_value=-1)

        nc = Component.autotyped(c, units=u)
        result.add_component(nc, column_name)

    return result
Пример #4
0
def _nddata_to_glue_data(ndd, data_label):
    if ndd.data.ndim != 2:
        raise ValueError(f'Imviz cannot load this NDData with ndim={ndd.data.ndim}')

    for attrib in ['data', 'mask', 'uncertainty']:
        arr = getattr(ndd, attrib)
        if arr is None:
            continue
        comp_label = attrib.upper()
        cur_label = f'{data_label}[{comp_label}]'
        cur_data = Data(label=cur_label)
        cur_data.meta.update(ndd.meta)
        if ndd.wcs is not None:
            cur_data.coords = ndd.wcs
        raw_arr = arr
        if attrib == 'data':
            bunit = ndd.unit or ''
        elif attrib == 'uncertainty':
            raw_arr = arr.array
            bunit = arr.unit or ''
        else:
            bunit = ''
        component = Component.autotyped(raw_arr, units=bunit)
        cur_data.add_component(component=component, label=comp_label)
        yield cur_data, cur_label
Пример #5
0
def astropy_tabular_data(*args, **kwargs):
    """
    Build a data set from a table. We restrict ourselves to tables
    with 1D columns.

    All arguments are passed to
        astropy.table.Table.read(...).
    """

    result = Data()

    table = astropy_table_read(*args, **kwargs)

    result.meta = table.meta

    # Loop through columns and make component list
    for column_name in table.columns:
        c = table[column_name]
        u = c.unit if hasattr(c, 'unit') else c.units

        if table.masked:
            # fill array for now
            try:
                c = c.filled(fill_value=np.nan)
            except (ValueError, TypeError):  # assigning nan to integer dtype
                c = c.filled(fill_value=-1)

        nc = Component.autotyped(c, units=u)
        result.add_component(nc, column_name)

    return result
Пример #6
0
def npz_reader(filename, format='auto', auto_merge=False, **kwargs):
    """
    Read in a Numpy structured array saved to a .npy or .npz file.

    Parameters
    ----------
    source: str
        The pathname to the Numpy save file.
    """

    import numpy as np
    npy_data = np.load(filename)

    groups = []
    for groupname in sorted(npy_data.files):
        d = Data(label=groupname)
        arr = npy_data[groupname]

        if not hasattr(arr.dtype, 'names'):
            raise ValueError("Numpy save file loading currently only supports structured"
                             " arrays, e.g., with specified names.")

        for name in arr.dtype.names:
            comp = Component.autotyped(arr[name])
            d.add_component(comp, label=name)

        groups.append(d)

    return groups
Пример #7
0
def _ndarray_to_glue_data(arr, data_label):
    if arr.ndim != 2:
        raise ValueError(f'Imviz cannot load this array with ndim={arr.ndim}')

    data = Data(label=data_label)
    component = Component.autotyped(arr)
    data.add_component(component=component, label='DATA')
    yield data, data_label
Пример #8
0
def hdf5_reader(filename, format='auto', auto_merge=False, **kwargs):
    """
    Read in all datasets from an HDF5 file

    Parameters
    ----------
    source: str or HDUList
        The pathname to the FITS file.
        If an HDUList is passed in, simply use that.
    """

    import h5py
    from astropy.table import Table

    # Open file
    file_handle = h5py.File(filename, 'r')

    # Define function to read

    # Read in all datasets
    datasets = extract_hdf5_datasets(file_handle)

    label_base = os.path.basename(filename).rpartition('.')[0]

    if not label_base:
        label_base = os.path.basename(filename)

    data_by_shape = {}

    groups = OrderedDict()

    for key in datasets:
        label = '{0}[{1}]'.format(label_base, key)
        if datasets[key].dtype.kind in ('f', 'i'):
            if auto_merge and datasets[key].value.shape in data_by_shape:
                data = data_by_shape[datasets[key].value.shape]
            else:
                data = Data(label=label)
                data_by_shape[datasets[key].value.shape] = data
                groups[label] = data
            data.add_component(component=datasets[key].value, label=key)
        else:
            table = Table.read(datasets[key], format='hdf5')
            data = Data(label=label)
            groups[label] = data
            for column_name in table.columns:
                column = table[column_name]
                if column.ndim == 1:
                    component = Component(column, units=column.unit)
                    data.add_component(component=component, label=column_name)
                else:
                    warnings.warn(
                        "HDF5: Ignoring vector column {0}".format(column_name))

    # Close HDF5 file
    file_handle.close()

    return [groups[idx] for idx in groups]
Пример #9
0
def hdf5_reader(filename, auto_merge=False, memmap=True, **kwargs):
    """
    Read in all datasets from an HDF5 file

    Parameters
    ----------
    filename : str
        The filename of the HDF5 file
    memmap : bool, optional
        Whether to use memory mapping
    """

    from astropy.table import Table

    # Read in all datasets
    datasets = extract_hdf5_datasets(filename, memmap=memmap)

    label_base = os.path.basename(filename).rpartition('.')[0]

    if not label_base:
        label_base = os.path.basename(filename)

    data_by_shape = {}

    groups = OrderedDict()

    for key in datasets:
        label = '{0}[{1}]'.format(label_base, key)
        array = datasets[key]
        if isinstance(array, Table):
            data = Data(label=label)
            groups[label] = data
            for column_name in array.columns:
                column = array[column_name]
                if column.ndim == 1:
                    component = Component.autotyped(column, units=column.unit)
                    data.add_component(component=component, label=column_name)
                else:
                    warnings.warn(
                        "HDF5: Ignoring vector column {0}".format(column_name))
        else:
            if auto_merge and array.shape in data_by_shape:
                data = data_by_shape[datasets[key].shape]
            else:
                data = Data(label=label)
                data_by_shape[array.shape] = data
                groups[label] = data
            data.add_component(component=datasets[key], label=key[1:])

    return [groups[idx] for idx in groups]
Пример #10
0
def hdf5_reader(filename, auto_merge=False, memmap=True, **kwargs):
    """
    Read in all datasets from an HDF5 file

    Parameters
    ----------
    filename : str
        The filename of the HDF5 file
    memmap : bool, optional
        Whether to use memory mapping
    """

    from astropy.table import Table

    # Read in all datasets
    datasets = extract_hdf5_datasets(filename, memmap=memmap)

    label_base = os.path.basename(filename).rpartition('.')[0]

    if not label_base:
        label_base = os.path.basename(filename)

    data_by_shape = {}

    groups = OrderedDict()

    for key in datasets:
        label = '{0}[{1}]'.format(label_base, key)
        array = datasets[key]
        if isinstance(array, Table):
            data = Data(label=label)
            groups[label] = data
            for column_name in array.columns:
                column = array[column_name]
                if column.ndim == 1:
                    component = Component.autotyped(column, units=column.unit)
                    data.add_component(component=component,
                                       label=column_name)
                else:
                    warnings.warn("HDF5: Ignoring vector column {0}".format(column_name))
        else:
            if auto_merge and array.shape in data_by_shape:
                data = data_by_shape[datasets[key].shape]
            else:
                data = Data(label=label)
                data_by_shape[array.shape] = data
                groups[label] = data
            data.add_component(component=datasets[key], label=key[1:])

    return [groups[idx] for idx in groups]
Пример #11
0
def _hdu2data(hdu, data_label, hdulist, include_wcs=True):
    if 'BUNIT' in hdu.header and _validate_bunit(hdu.header['BUNIT'], raise_error=False):
        bunit = hdu.header['BUNIT']
    else:
        bunit = ''

    comp_label = f'{hdu.name.upper()},{hdu.ver}'
    new_data_label = f'{data_label}[{comp_label}]'

    data = Data(label=new_data_label)
    if include_wcs:
        data.coords = WCS(hdu.header, hdulist)
    component = Component.autotyped(hdu.data, units=bunit)
    data.add_component(component=component, label=comp_label)

    return data, new_data_label
Пример #12
0
def _hdu2data(hdu, data_label, hdulist, include_wcs=True):
    if 'BUNIT' in hdu.header:
        bunit = _validate_bunit(hdu.header['BUNIT'], raise_error=False)
    else:
        bunit = ''

    comp_label = f'{hdu.name.upper()},{hdu.ver}'
    new_data_label = f'{data_label}[{comp_label}]'

    data = Data(label=new_data_label)
    if hdulist is not None and hdu.name != 'PRIMARY' and 'PRIMARY' in hdulist:
        data.meta.update(dict(hdulist['PRIMARY'].header))
    data.meta.update(dict(hdu.header))
    if include_wcs:
        data.coords = WCS(hdu.header, hdulist)
    component = Component.autotyped(hdu.data, units=bunit)
    data.add_component(component=component, label=comp_label)

    return data, new_data_label
Пример #13
0
def _jwst2data(file_obj, ext, data_label):
    comp_label = ext.upper()
    new_data_label = f'{data_label}[{comp_label}]'
    data = Data(label=new_data_label)
    unit_attr = f'bunit_{ext}'

    try:
        # This is very specific to JWST pipeline image output.
        with AsdfInFits.open(file_obj) as af:
            dm = af.tree
            dm_meta = af.tree["meta"]

            if (unit_attr in dm_meta and
                    _validate_bunit(dm_meta[unit_attr], raise_error=False)):
                bunit = dm_meta[unit_attr]
            else:
                bunit = ''

            # This is instance of gwcs.WCS, not astropy.wcs.WCS
            if 'wcs' in dm_meta:
                data.coords = dm_meta['wcs']

            imdata = dm[ext]
            component = Component.autotyped(imdata, units=bunit)

            # Might have bad GWCS. If so, we exclude it.
            try:
                data.add_component(component=component, label=comp_label)
            except Exception:  # pragma: no cover
                data.coords = None
                data.add_component(component=component, label=comp_label)

    # TODO: Do not need this when jwst.datamodels finally its own package.
    # This might happen for grism image; fall back to FITS loader without WCS.
    except Exception:
        if ext == 'data':
            ext = 'sci'
        hdu = file_obj[ext]
        return _hdu2data(hdu, data_label, file_obj, include_wcs=False)

    return data, new_data_label
Пример #14
0
def casalike_cube(filename, **kwargs):
    """
    This provides special support for 4D CASA FITS - like cubes,
    which have 2 spatial axes, a spectral axis, and a stokes axis
    in that order.

    Each stokes cube is split out as a separate component
    """
    from astropy.io import fits

    result = Data()

    if 'ignore_missing_end' not in kwargs:
        kwargs['ignore_missing_end'] = True

    with fits.open(filename, mode='denywrite', **kwargs) as hdulist:
        array = hdulist[0].data
        header = hdulist[0].header
    result.coords = coordinates_from_header(header)
    for i in range(array.shape[0]):
        units = header.get('BUNIT')
        component = Component.autotyped(array[[i]], units=units)
        result.add_component(component, label='STOKES %i' % i)
    return result
Пример #15
0
def gridded_data(filename, format='auto', **kwargs):

    result = Data()

    # Try and automatically find the format if not specified
    if format == 'auto':
        format = file_format(filename)

    # Read in the data
    if is_fits(filename):
        from astropy.io import fits
        arrays = extract_data_fits(filename, **kwargs)
        header = fits.getheader(filename)
        result.coords = coordinates_from_header(header)
    elif is_hdf5(filename):
        arrays = extract_data_hdf5(filename, **kwargs)
    else:
        raise Exception("Unkonwn format: %s" % format)

    for component_name in arrays:
        comp = Component.autotyped(arrays[component_name])
        result.add_component(comp, component_name)

    return result
Пример #16
0
def casalike_cube(filename, **kwargs):
    """
    This provides special support for 4D CASA FITS - like cubes,
    which have 2 spatial axes, a spectral axis, and a stokes axis
    in that order.

    Each stokes cube is split out as a separate component
    """
    from astropy.io import fits

    result = Data()

    if 'ignore_missing_end' not in kwargs:
        kwargs['ignore_missing_end'] = True

    with fits.open(filename, **kwargs) as hdulist:
        array = hdulist[0].data
        header = hdulist[0].header
    result.coords = coordinates_from_header(header)
    for i in range(array.shape[0]):
        units = header.get('BUNIT')
        component = Component.autotyped(array[[i]], units=units)
        result.add_component(component, label='STOKES %i' % i)
    return result
Пример #17
0
def gridded_data(filename, format='auto', **kwargs):

    result = Data()

    # Try and automatically find the format if not specified
    if format == 'auto':
        format = file_format(filename)

    # Read in the data
    if is_fits(filename):
        from astropy.io import fits
        arrays = extract_data_fits(filename, **kwargs)
        header = fits.getheader(filename)
        result.coords = coordinates_from_header(header)
    elif is_hdf5(filename):
        arrays = extract_data_hdf5(filename, **kwargs)
    else:
        raise Exception("Unkonwn format: %s" % format)

    for component_name in arrays:
        comp = Component.autotyped(arrays[component_name])
        result.add_component(comp, component_name)

    return result
Пример #18
0
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
    """
    Read in all extensions from a FITS file.

    Parameters
    ----------
    source: str or HDUList
        The pathname to the FITS file.
        If an HDUList is passed in, simply use that.

    auto_merge: bool
        Merge extensions that have the same shape
        and only one has a defined WCS.

    exclude_exts: [hdu, ] or [index, ]
        List of HDU's to exclude from reading.
        This can be a list of HDU's or a list
        of HDU indexes.
    """

    from astropy.io import fits
    from astropy.table import Table

    exclude_exts = exclude_exts or []
    if not isinstance(source, fits.hdu.hdulist.HDUList):
        hdulist = fits.open(source)
        hdulist.verify('fix')
    else:
        hdulist = source
    groups = OrderedDict()
    extension_by_shape = OrderedDict()

    if label is not None:

        label_base = label

    else:

        hdulist_name = hdulist.filename()
        if hdulist_name is None:
            hdulist_name = "HDUList"

        label_base = basename(hdulist_name).rpartition('.')[0]

        if not label_base:
            label_base = basename(hdulist_name)

    # Create a new image Data.
    def new_data():
        label = '{0}[{1}]'.format(label_base, hdu_name)
        data = Data(label=label)
        data.coords = coords
        groups[hdu_name] = data
        extension_by_shape[shape] = hdu_name
        return data

    for extnum, hdu in enumerate(hdulist):
        hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
        if (hdu.data is not None and hdu.data.size > 0
                and hdu_name not in exclude_exts
                and extnum not in exclude_exts):
            if is_image_hdu(hdu):
                shape = hdu.data.shape
                coords = coordinates_from_header(hdu.header)
                if not auto_merge or has_wcs(coords):
                    data = new_data()
                else:
                    try:
                        data = groups[extension_by_shape[shape]]
                    except KeyError:
                        data = new_data()
                data.add_component(component=hdu.data, label=hdu_name)
            elif is_table_hdu(hdu):
                # Loop through columns and make component list
                table = Table(hdu.data)
                label = '{0}[{1}]'.format(label_base, hdu_name)
                data = Data(label=label)
                groups[hdu_name] = data
                for column_name in table.columns:
                    column = table[column_name]
                    component = Component(column, units=column.unit)
                    data.add_component(component=component, label=column_name)
    return [groups[idx] for idx in groups]
Пример #19
0
def hdf5_reader(filename, auto_merge=True, memmap=True, **kwargs):
    """
    Read in all datasets from an HDF5 file

    Parameters
    ----------
    filename : str or file-like
        The path or file handle to the HDF5 file
    auto_merge : bool
        If all datasets have the same shape, and are at the base of the file,
        assume they are a column-based table and merge them into a single dataset.
    memmap : bool, optional
        Whether to use memory mapping
    """

    from astropy.table import Table

    # Read in all datasets
    datasets = extract_hdf5_datasets(filename, memmap=memmap)

    label_base = os.path.basename(filename).rpartition('.')[0]

    if not label_base:
        label_base = os.path.basename(filename)

    if len(datasets) == 0:
        return

    if not auto_merge or len(datasets) == 1 or any(
        [isinstance(data, Table) for data in datasets.values()]):
        merge_data = False
    else:
        reference_shape = list(datasets.values())[0].shape
        merge_data = all([
            data.shape == reference_shape and key.count('/') == 1
            for key, data in datasets.items()
        ])

    groups = OrderedDict()

    data = None

    for key in datasets:
        label = '{0}[{1}]'.format(label_base, key)
        array = datasets[key]
        if isinstance(array, Table):
            data = Data(label=label)
            groups[label] = data
            for column_name in array.columns:
                column = array[column_name]
                if column.ndim == 1:
                    component = Component.autotyped(column, units=column.unit)
                    data.add_component(component=component, label=column_name)
                else:
                    warnings.warn(
                        "HDF5: Ignoring vector column {0}".format(column_name))
        else:
            if data is None and merge_data:
                data = Data(label=label_base)
                groups[label_base] = data
            elif not merge_data:
                data = Data(label=label)
                groups[label] = data
            data.add_component(component=datasets[key], label=key[1:])

    return [groups[idx] for idx in sorted(groups)]
Пример #20
0
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
    """
    Read in all extensions from a FITS file.

    Parameters
    ----------
    source: str or HDUList
        The pathname to the FITS file.
        If an HDUList is passed in, simply use that.

    auto_merge: bool
        Merge extensions that have the same shape
        and only one has a defined WCS.

    exclude_exts: [hdu, ] or [index, ]
        List of HDU's to exclude from reading.
        This can be a list of HDU's or a list
        of HDU indexes.
    """

    from astropy.io import fits
    from astropy.table import Table

    exclude_exts = exclude_exts or []

    if isinstance(source, fits.hdu.hdulist.HDUList):
        hdulist = source
        close_hdulist = False
    else:
        hdulist = fits.open(source, ignore_missing_end=True)
        hdulist.verify('fix')
        close_hdulist = True

    groups = OrderedDict()
    extension_by_shape = OrderedDict()

    if label is not None:

        label_base = label

    else:

        hdulist_name = hdulist.filename()
        if hdulist_name is None:
            hdulist_name = "HDUList"

        label_base = basename(hdulist_name).rpartition('.')[0]

        if not label_base:
            label_base = basename(hdulist_name)

    # Create a new image Data.
    def new_data(suffix=True):
        if suffix:
            label = '{0}[{1}]'.format(label_base, hdu_name)
        else:
            label = label_base
        data = Data(label=label)
        data.coords = coords

        # We need to be careful here because some header values are special
        # objects that we should convert to strings
        for key, value in hdu.header.items():
            if (key == 'COMMENT' or key == 'HISTORY'):
                if key not in data.meta:
                    data.meta[key] = [str(value)]
                else:
                    data.meta[key].append(str(value))
            elif isinstance(value, string_types) or isinstance(value, (int, float, bool)):
                data.meta[key] = value
            else:
                data.meta[key] = str(value)

        groups[hdu_name] = data
        extension_by_shape[shape] = hdu_name
        return data

    for extnum, hdu in enumerate(hdulist):
        hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
        if (hdu.data is not None and
                hdu.data.size > 0 and
                hdu_name not in exclude_exts and
                extnum not in exclude_exts):
            if is_image_hdu(hdu):
                shape = hdu.data.shape
                coords = coordinates_from_header(hdu.header)
                if not auto_merge or has_wcs(coords):
                    data = new_data(suffix=len(hdulist) > 1)
                else:
                    try:
                        data = groups[extension_by_shape[shape]]
                    except KeyError:
                        data = new_data(suffix=len(hdulist) > 1)
                data.add_component(component=hdu.data,
                                   label=hdu_name)
            elif is_table_hdu(hdu):
                # Loop through columns and make component list
                table = Table.read(hdu, format='fits')
                label = '{0}[{1}]'.format(label_base, hdu_name)
                data = Data(label=label)
                groups[hdu_name] = data
                for column_name in table.columns:
                    column = table[column_name]
                    if column.ndim != 1:
                        warnings.warn("Dropping column '{0}' since it is not 1-dimensional".format(column_name))
                        continue
                    component = Component.autotyped(column, units=column.unit)
                    data.add_component(component=component,
                                       label=column_name)

    if close_hdulist:
        hdulist.close()

    return [groups[idx] for idx in groups]
Пример #21
0
def _load_component(rec, context):
    if 'log' in rec:
        return context.object(rec['log']).component(rec['log_item'])

    return Component(data=context.object(rec['data']),
                     units=rec['units'])
Пример #22
0
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
    """
    Read in all extensions from a FITS file.

    Parameters
    ----------
    source: str or HDUList
        The pathname to the FITS file.
        If an HDUList is passed in, simply use that.

    auto_merge: bool
        Merge extensions that have the same shape
        and only one has a defined WCS.

    exclude_exts: [hdu, ] or [index, ]
        List of HDU's to exclude from reading.
        This can be a list of HDU's or a list
        of HDU indexes.
    """

    from astropy.io import fits
    from astropy.table import Table

    exclude_exts = exclude_exts or []
    if not isinstance(source, fits.hdu.hdulist.HDUList):
        hdulist = fits.open(source, ignore_missing_end=True)
        hdulist.verify('fix')
    else:
        hdulist = source
    groups = OrderedDict()
    extension_by_shape = OrderedDict()

    if label is not None:

        label_base = label

    else:

        hdulist_name = hdulist.filename()
        if hdulist_name is None:
            hdulist_name = "HDUList"

        label_base = basename(hdulist_name).rpartition('.')[0]

        if not label_base:
            label_base = basename(hdulist_name)

    # Create a new image Data.
    def new_data():
        label = '{0}[{1}]'.format(
            label_base,
            hdu_name
        )
        data = Data(label=label)
        data.coords = coords
        groups[hdu_name] = data
        extension_by_shape[shape] = hdu_name
        return data

    for extnum, hdu in enumerate(hdulist):
        hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
        if (hdu.data is not None and
                hdu.data.size > 0 and
                hdu_name not in exclude_exts and
                extnum not in exclude_exts):
            if is_image_hdu(hdu):
                shape = hdu.data.shape
                coords = coordinates_from_header(hdu.header)
                if not auto_merge or has_wcs(coords):
                    data = new_data()
                else:
                    try:
                        data = groups[extension_by_shape[shape]]
                    except KeyError:
                        data = new_data()
                data.add_component(component=hdu.data,
                                   label=hdu_name)
            elif is_table_hdu(hdu):
                # Loop through columns and make component list
                table = Table.read(hdu, format='fits')
                label = '{0}[{1}]'.format(
                    label_base,
                    hdu_name
                )
                data = Data(label=label)
                groups[hdu_name] = data
                for column_name in table.columns:
                    column = table[column_name]
                    if column.ndim != 1:
                        warnings.warn("Dropping column '{0}' since it is not 1-dimensional".format(column_name))
                        continue
                    component = Component.autotyped(column, units=column.unit)
                    data.add_component(component=component,
                                       label=column_name)
    return [groups[idx] for idx in groups]
Пример #23
0
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
    """
    Read in all extensions from a FITS file.

    Parameters
    ----------
    source: str or HDUList
        The pathname to the FITS file.
        If an HDUList is passed in, simply use that.

    auto_merge: bool
        Merge extensions that have the same shape
        and only one has a defined WCS.

    exclude_exts: [hdu, ] or [index, ]
        List of HDU's to exclude from reading.
        This can be a list of HDU's or a list
        of HDU indexes.
    """

    from astropy.io import fits
    from astropy.table import Table

    exclude_exts = exclude_exts or []

    if isinstance(source, fits.hdu.hdulist.HDUList):
        hdulist = source
        close_hdulist = False
    else:
        hdulist = fits.open(source, ignore_missing_end=True)
        hdulist.verify('fix')
        close_hdulist = True

    groups = OrderedDict()
    extension_by_shape = OrderedDict()

    if label is not None:

        label_base = label

    else:

        hdulist_name = hdulist.filename()
        if hdulist_name is None:
            hdulist_name = "HDUList"

        label_base = basename(hdulist_name).rpartition('.')[0]

        if not label_base:
            label_base = basename(hdulist_name)

    # Create a new image Data.
    def new_data(suffix=True):
        if suffix:
            label = '{0}[{1}]'.format(label_base, hdu_name)
        else:
            label = label_base
        data = Data(label=label)
        data.coords = coords

        # We need to be careful here because some header values are special
        # objects that we should convert to strings
        for key, value in hdu.header.items():
            if (key == 'COMMENT' or key == 'HISTORY'):
                if key not in data.meta:
                    data.meta[key] = [str(value)]
                else:
                    data.meta[key].append(str(value))
            elif isinstance(value, string_types) or isinstance(value, (int, float, bool)):
                data.meta[key] = value
            else:
                data.meta[key] = str(value)

        groups[hdu_name] = data
        extension_by_shape[shape] = hdu_name
        return data

    for extnum, hdu in enumerate(hdulist):
        hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
        if (hdu.data is not None and
                hdu.data.size > 0 and
                hdu_name not in exclude_exts and
                extnum not in exclude_exts):
            if is_image_hdu(hdu):
                shape = hdu.data.shape
                coords = coordinates_from_header(hdu.header)
                units = hdu.header.get('BUNIT')
                if not auto_merge or has_wcs(coords):
                    data = new_data(suffix=len(hdulist) > 1)
                else:
                    try:
                        data = groups[extension_by_shape[shape]]
                    except KeyError:
                        data = new_data(suffix=len(hdulist) > 1)
                component = Component.autotyped(hdu.data, units=units)
                data.add_component(component=component,
                                   label=hdu_name)
            elif is_table_hdu(hdu):
                # Loop through columns and make component list
                table = Table.read(hdu, format='fits')
                label = '{0}[{1}]'.format(label_base, hdu_name)
                data = Data(label=label)
                groups[hdu_name] = data
                for column_name in table.columns:
                    column = table[column_name]
                    if column.ndim != 1:
                        warnings.warn("Dropping column '{0}' since it is not 1-dimensional".format(column_name))
                        continue
                    component = Component.autotyped(column, units=column.unit)
                    data.add_component(component=component,
                                       label=column_name)

    if close_hdulist:
        hdulist.close()

    return [groups[idx] for idx in groups]