コード例 #1
0
def test_ambiguous_format(tmpdir):

    @data_factory('b', identifier=df.has_extension('spam'), priority=34)
    def reader1(filename):
        return Data()

    @data_factory('a', identifier=df.has_extension('spam'), priority=34)
    def reader2(filename):
        return Data()

    @data_factory('c', identifier=df.has_extension('spam'), priority=22)
    def reader3(filename):
        return Data()

    filename = tmpdir.join('test.spam').strpath
    with open(filename, 'w') as f:
        f.write('Camelot!')

    # Should raise a warning and pick the highest priority one in alphabetical
    # order

    with warnings.catch_warnings(record=True) as w:
        factory = df.find_factory(filename)

    assert len(w) == 1
    assert str(w[0].message) == "Multiple data factories matched the input: 'a', 'b'. Choosing 'a'."

    assert factory is reader2
コード例 #2
0
def test_ambiguous_format(tmpdir):

    @data_factory('b', identifier=df.has_extension('spam'), priority=34)
    def reader1(filename):
        return Data()

    @data_factory('a', identifier=df.has_extension('spam'), priority=34)
    def reader2(filename):
        return Data()

    @data_factory('c', identifier=df.has_extension('spam'), priority=22)
    def reader3(filename):
        return Data()

    filename = tmpdir.join('test.spam').strpath
    with open(filename, 'w') as f:
        f.write('Camelot!')

    # Should raise a warning and pick the highest priority one in alphabetical
    # order

    with warnings.catch_warnings(record=True) as w:
        factory = df.find_factory(filename)

    assert len(w) == 1
    assert str(w[0].message) == "Multiple data factories matched the input: 'a', 'b'. Choosing 'a'."

    assert factory is reader2
コード例 #3
0
ファイル: test_data_factories.py プロジェクト: dhomeier/glue
def test_basedata(tmpdir):

    # Regression test for a bug that caused load_data to fail if a data
    # factory returned a BaseData (but not Data) subclass, due to the
    # LoadLog expecting a Data object

    class BigData(BaseCartesianData):
        def get_kind(self):
            pass

        def compute_histogram(self):
            pass

        def compute_statistic(self):
            pass

        def get_mask(self):
            pass

        @property
        def shape(self):
            pass

        @property
        def main_components(self):
            return []

    @data_factory('bigdata',
                  identifier=df.has_extension('bigdata'),
                  priority=34)
    def reader(filename):
        return BigData()

    filename = tmpdir.join('test.bigdata').strpath
    with open(filename, 'w') as f:
        f.write('Camelot!')

    d = df.load_data(filename)

    assert isinstance(d, BigData)
コード例 #4
0
class RegionData(Data):

    def to_subset(self, wcs):
        print("thing with to_subset: ",self)
        preg = [reg.to_pixel(wcs)
                if hasattr(reg, 'to_pixel')
                else reg
                for reg in self['regions']]

        subsets = [reg_to_roi(reg) for reg in preg]
        return subsets



@data_factory('DS9 Region File', has_extension('reg'), default='reg')
def ds9_region(filename):
    reg = regions.read_ds9(filename)

    comp = Component(np.ones(len(reg), dtype='bool'))
    data = RegionData(label='Regions: {0}'.format(os.path.split(filename)[-1]),
                      regions=reg)

    return data

@layer_action(label='Convert regions to subset')
def layer_to_subset(selected_layers, data_collection):

    # loop over selected  layers
    for layer in selected_layers:
        if isinstance(layer, RegionData):
コード例 #5
0
from glue.core import Data
from glue.core.coordinates import coordinates_from_wcs
from glue.config import data_factory
from glue.core.data_factories import has_extension

import pyfits as fits
from stwcs.wcsutil import HSTWCS


@data_factory('Hubble Image', has_extension('fits fit'), default='fits fit')
def hubble_data(filename):
    """
    Data loader customized for 'typical' hubble fits files

    This function extracts groups of (SCI, ERR, and DQ) extensions
    from a file. Each is retuned as a glue Data object

    HSTWCS objects are used to parse wcs.

    Any other extensions are ignored
    """
    #assumption: relevant SCI/ERR/DQ arrays are
    #grouped together, with SCI component first

    result = []

    hdulist = fits.open(filename, memmap=True)

    label = filename.split('.')[0]
    label = label.split('/')[-1].split('\\')[-1]
    index = 0
コード例 #6
0
    with open(filename) as hfile:
        hdr = hfile.read().splitlines()
    hdr = map(float, hdr)
    hdr = dict(CD1_1=hdr[0],
               CD1_2=hdr[1] * (-1),
               CD2_1=hdr[2],
               CD2_2=hdr[3] * (-1),
               CRVAL1=hdr[4],
               CRVAL2=hdr[5],
               CRPIX1=0,
               CRPIX2=shp[0])
    wcs = WCS(hdr)
    return coordinates_from_wcs(wcs)


@data_factory('GIS TIFF', has_extension('tiff tif'), default='tif')
def read_tiff_metadata(filename):
    """ Read a TIFF image, looking for .tfw metadata """
    base, ext = os.path.splitext(filename)
    data = np.flipud(np.array(Image.open(filename).convert('L')))

    result = Data()

    if os.path.exists(base + '.tfw'):
        result.coords = tfw_to_coords(base + '.tfw', data.shape)

    result.add_component(data, 'map')
    return result


@data_factory('GIS Shapefile', has_extension('shx shp dbf'),
コード例 #7
0
ファイル: config.py プロジェクト: andreas-h/glue-data-loaders
from glue.core import Data
from glue.core.coordinates import coordinates_from_wcs
from glue.config import data_factory
from glue.core.data_factories import has_extension

import pyfits as fits
from stwcs.wcsutil import HSTWCS


@data_factory('Hubble Image', has_extension('fits fit'), default='fits fit')
def hubble_data(filename):
    """
    Data loader customized for 'typical' hubble fits files

    This function extracts groups of (SCI, ERR, and DQ) extensions
    from a file. Each is retuned as a glue Data object

    HSTWCS objects are used to parse wcs.

    Any other extensions are ignored
    """
    #assumption: relevant SCI/ERR/DQ arrays are
    #grouped together, with SCI component first

    result = []

    hdulist = fits.open(filename, memmap=True)

    label = filename.split('.')[0]
    label = label.split('/')[-1].split('\\')[-1]
    index = 0
コード例 #8
0
ファイル: loaders.py プロジェクト: LLi1996/galfaglue
from __future__ import print_function

from glue.core import Data
from glue.core.coordinates import coordinates_from_header
from glue.config import data_factory
from glue.core.data_factories import has_extension
from glue.external.astro import fits
import numpy as np

@data_factory('GALFA HI cube', has_extension('fits fit'))
def _load_GALFAHI_data(filename, **kwargs):
    def _get_cube_center(header, cubeshape):
        ra  = header['CRVAL1'] + header['CDELT1'] * (np.arange(cubeshape[2])+0.5 - header['CRPIX1'])              ## degree
        dec = header['CRVAL2'] + header['CDELT2'] * (np.arange(cubeshape[1])+0.5 - header['CRPIX2'])            ## degree
        return np.mean(ra), np.mean(dec)

    # add the primary components
    cube = fits.getdata(filename)
    header = fits.getheader(filename)
    header['CDELT3'] = header['CDELT3'] * (10**(-3))        # m/s --> km/s
    cen_ra, cen_dec = _get_cube_center(header, cube.shape)
    nn = filename.split('/')[-1]
    # cube_name = '%s_RA%dDEC%d' % (nn[0:3], cen_ra, cen_dec)
    cube_name = 'G_%d%+.2fradec_%.1fkm/s_%.1fa' % (cen_ra, cen_dec, header['CDELT3'], header['CDELT2']*60.)

    data = Data()
    data.coords = coordinates_from_header(header)
    data.add_component(cube, cube_name)
    data.label  = cube_name

    data_list = []
コード例 #9
0
ファイル: loaders.py プロジェクト: jegpeek/cube-tools
from __future__ import print_function

from os.path import basename
from collections import defaultdict

from glue.core import Data
from glue.core.coordinates import coordinates_from_header
from glue.config import data_factory
from glue.core.data_factories import has_extension
from glue.external.astro import fits


@data_factory('Generic FITS', has_extension('fits fit'))
def _load_fits_generic(filename, **kwargs):
    hdulist = fits.open(filename)
    groups = defaultdict(Data)
    for extnum, hdu in enumerate(hdulist):
        if not isinstance(hdu, fits.TableHDU) and\
           hdu.data is not None:
            shape = hdu.data.shape
            if shape not in groups:
                label = '{}[{}]'.format(
                    basename(filename).split('.', 1)[0],
                    'x'.join((str(x) for x in shape))
                )
                data = Data(label=label)
                data.coords = coordinates_from_header(hdu.header)
                groups[shape] = data
            else:
                data = groups[shape]
            data.add_component(component=hdu.data,
コード例 #10
0
from glue.core import Data
from glue.core.coordinates import coordinates_from_wcs
from glue.config import data_factory
from glue.core.data_factories import has_extension

import astropy.io.fits as fits
from astropy.wcs import WCS
import numpy as np


@data_factory('Herschel Image',
              has_extension('fits fit fits.gz'),
              default='fits fit fits.gz')
def herschel_data(filename):
    """
    Data loader customized for Herschel fits files

    This function extracts extension named 'image',
    'error', 'coverage', etc
    from a file. Each is returned as a glue Data object.
    To handle PACS cubes, if ImageIndex extension is present
    it is used to provide wavelengths

    astropy.wcs.WCS objects are used to parse wcs.

    Any other extensions are ignored
    """

    hdulist = fits.open(filename, memmap=True, ignore_missing_end=True)

    d = Data("data")