def test_inherited_registration(): # check that multi-generation inheritance works properly, # meaning that a child inherits from parents before # grandparents, see astropy/astropy#7156 class Child1(Table): pass class Child2(Child1): pass def _read(): return Table() def _read1(): return Child1() # check that reader gets inherited io_registry.register_reader('test', Table, _read) assert io_registry.get_reader('test', Child2) is _read # check that nearest ancestor is identified # (i.e. that the reader for Child2 is the registered method # for Child1, and not Table) io_registry.register_reader('test', Child1, _read1) assert io_registry.get_reader('test', Child2) is _read1
def __init__(self): self.mjdMid = None self.mjdSpan = None self.tStart = None self.tStop = None self.ncoeff = None self.coeffs = None self.obs = None self.fileName = None self.fileFormat = None self.newFileName = None self.polycoTable = None self.polycoFormat = [{'format': 'tempo', 'read_method' : tempo_polyco_table_reader, 'write_method' : tempo_polyco_table_writer},] # Register the table built-in reading and writing format for fmt in self.polycoFormat: if fmt['format'] not in registry.get_formats()['Format']: if fmt['read_method'] != None: registry.register_reader(fmt['format'], table.Table, fmt['read_method']) if fmt['write_method'] != None: registry.register_writer(fmt['format'], table.Table, fmt['write_method'])
def inherit_io_registrations(cls): parent = cls.__mro__[1] for row in registry.get_formats(data_class=parent): name = row["Format"] # read if row["Read"].lower() == "yes": registry.register_reader( name, cls, registry.get_reader(name, parent), force=False, ) # write if row["Write"].lower() == "yes": registry.register_writer( name, cls, registry.get_writer(name, parent), force=False, ) # identify if row["Auto-identify"].lower() == "yes": registry.register_identifier( name, cls, registry._identifiers[(name, parent)], force=False, ) return cls
def _test_frame_read_format(self, format): # test with specific format try: self.frame_read(format=format) except ImportError as e: self.skipTest(str(e)) else: # test again with no format argument # but we need to move other readers out of the way first try: read_ = get_reader('gwf', TimeSeries) except Exception: pass else: register_reader( 'gwf', TimeSeries, get_reader(format, TimeSeries), force=True) try: self.frame_read() finally: register_reader('gwf', TimeSeries, read_, force=True) # test errors self.assertRaises( ValueError, self.TEST_CLASS.read, Cache(), self.channel, format=format)
def __init__(self, name, start, end, channel=None, etg=None, table=None, cache=None, url=None, **kwargs): """Create a new `EventTriggerTab` """ super(EventTriggerTab, self).__init__(name, start, end, **kwargs) self.channel = channel and get_channel(channel) or None self.cache = cache self.url = url self.error = dict() # parse ETG and LIGO_LW table class if etg is None: etg = self.name self.etg = etg if table is None or isinstance(table, str): tablename = isinstance(table, str) and table or self.etg try: table = get_etg_table(tablename) except KeyError as e: e.args = ("Cannot automatically determine LIGO_LW table for " "etg %r, please specify in configuration file or " "when creating EventTriggerTab" % tablename,) raise # register custom readers for this type try: register_etg_table(self.etg.lower(), table) except KeyError: pass try: register_reader(self.etg.lower(), table, get_reader('ligolw', table)) except Exception as e: if 'already defined' in str(e): pass else: raise
def decorator(func): io_registry.register_reader(label, dtype, func) if identifier is None: # If the identifier is not defined, but the extensions are, create # a simple identifier based off file extension. if extensions is not None: logging.info("'{}' data loader provided for {} without " "explicit identifier. Creating identifier using " "list of compatible extensions".format( label, dtype.__name__)) id_func = lambda *args, **kwargs: any([args[1].endswith(x) for x in extensions]) # Otherwise, create a dummy identifier else: logging.warning("'{}' data loader provided for {} without " "explicit identifier or list of compatible " "extensions".format(label, dtype.__name__)) id_func = lambda *args, **kwargs: True else: id_func = identifier_wrapper(identifier) io_registry.register_identifier(label, dtype, id_func) # Include the file extensions as attributes on the function object func.extensions = extensions # Include priority on the loader function attribute func.priority = priority # Sort the io_registry based on priority sorted_loaders = sorted(io_registry._readers.items(), key=lambda item: getattr(item[1], 'priority', 0)) # Update the registry with the sorted dictionary io_registry._readers.clear() io_registry._readers.update(sorted_loaders) logging.debug("Successfully loaded reader \"{}\".".format(label)) # Automatically register a SpectrumList reader for any data_loader that # reads Spectrum1D objects. TODO: it's possible that this # functionality should be opt-in rather than automatic. if dtype is Spectrum1D: def load_spectrum_list(*args, **kwargs): return SpectrumList([ func(*args, **kwargs) ]) # Add these attributes to the SpectrumList reader as well load_spectrum_list.extensions = extensions load_spectrum_list.priority = priority io_registry.register_reader(label, SpectrumList, load_spectrum_list) io_registry.register_identifier(label, SpectrumList, id_func) logging.debug("Created SpectrumList reader for \"{}\".".format(label)) @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
def astropy_tabular_data(*args, **kwargs): """ Build a data set from a table. We restrict ourselves to tables with 1D columns. All arguments are passed to astropy.table.Table.read(...). """ from distutils.version import LooseVersion from astropy import __version__ if LooseVersion(__version__) < LooseVersion("0.2"): raise RuntimeError("Glue requires astropy >= v0.2. Please update") result = Data() # Read the table from astropy.table import Table # Add identifiers for ASCII data from astropy.io import registry if LooseVersion(__version__) < LooseVersion("0.3"): registry.register_identifier('ascii', Table, _ascii_identifier_v02, force=True) else: # Basically, we always want the plain ascii reader for now. # But astropy will complain about ambiguous formats (or use another reader) # unless we remove other registry identifiers and set up our own reader nope = lambda *a, **k: False registry.register_identifier('ascii.glue', Table, _ascii_identifier_v03, force=True) registry.register_identifier('ascii.csv', Table, nope, force=True) registry.register_identifier('ascii.fast_csv', Table, nope, force=True) registry.register_identifier('ascii', Table, nope, force=True) registry.register_reader('ascii.glue', Table, lambda path: Table.read(path, format='ascii'), force=True) # Import FITS compatibility (for Astropy 0.2.x) from ..external import fits_io table = Table.read(*args, **kwargs) # Loop through columns and make component list for column_name in table.columns: c = table[column_name] u = c.unit if hasattr(c, 'unit') else c.units if table.masked: # fill array for now try: c = c.filled(fill_value=np.nan) except ValueError: # assigning nan to integer dtype c = c.filled(fill_value=-1) nc = Component.autotyped(c, units=u) result.add_component(nc, column_name) return result
def add_polyco_file_format(self, formatName, methodMood, readMethod = None, writeMethod = None): """ Add a polyco file format and its reading/writing method to the class. Then register it to the table reading. Parameters --------- formatName : str The name for the format. methodMood : str ['r','w','rw']. 'r' represent as reading 'w' represent as writting 'rw' represent as reading and writting readMethod : method The method for reading the file format. writeMethod : method The method for writting the file to disk. """ # Check if the format already exist. if (formatName in [f['format'] for f in self.polycoFormat] or formatName in registry.get_formats()['Format']): errorMssg = 'Format name \''+formatName+ '\' is already exist. ' raise Exception(errorMssg) pFormat = {'format' : formatName} if methodMood == 'r': if readMethod == None: raise BaseException('Argument readMethod should not be \'None\'.') pFormat['read_method'] = readMethod pFormat['write_method'] = writeMethod registry.register_reader(pFormat['format'], table.Table, pFormat['read_method']) elif methodMood == 'w': if writeMethod == None: raise BaseException('Argument writeMethod should not be \'None\'.') pFormat['read_method'] = readMethod pFormat['write_method'] = writeMethod registry.register_writer(pFormat['format'], table.Table, pFormat['write_method']) elif methodMood == 'rw': if readMethod == None or writeMethod == None: raise BaseException('Argument readMethod and writeMethod ' 'should not be \'None\'.') pFormat['read_method'] = readMethod pFormat['write_method'] = writeMethod registry.register_reader(pFormat['format'], table.Table, pFormat['read_method']) registry.register_writer(pFormat['format'], table.Table, pFormat['write_method']) self.polycoFormat.append(pFormat)
def test_read_basic_table(): data = np.array(list(zip([1, 2, 3], ['a', 'b', 'c'])), dtype=[(str('A'), int), (str('B'), '|U1')]) io_registry.register_reader('test', Table, lambda x: Table(x)) t = Table.read(data, format='test') assert t.keys() == ['A', 'B'] for i in range(3): assert t['A'][i] == data['A'][i] assert t['B'][i] == data['B'][i]
def test_register_readers_with_same_name_on_different_classes(): # No errors should be generated if the same name is registered for # different objects...but this failed under python3 io_registry.register_reader('test', TestData, lambda: TestData()) io_registry.register_reader('test', Table, lambda: Table()) t = TestData.read(format='test') assert isinstance(t, TestData) tbl = Table.read(format='test') assert isinstance(tbl, Table)
def decorator(func): io_registry.register_reader(label, dtype, func) io_registry.register_identifier(label, dtype, identifier) logging.info("Successfully loaded reader \"{}\".".format(label)) @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
def register_hdf5(): """ Register HDF5 with Unified I/O. """ from astropy.io import registry as io_registry from astropy.table import Table io_registry.register_reader('hdf5', Table, read_table_hdf5) io_registry.register_writer('hdf5', Table, write_table_hdf5) io_registry.register_identifier('hdf5', Table, is_hdf5)
def register_ascii(obj): """Register ASCII I/O methods for given type obj This factory method registers 'txt' and 'csv' I/O formats with a reader, writer, and auto-identifier """ for form, delim in formats.iteritems(): read_, write_ = ascii_io_factory(obj, delim) register_identifier(form, obj, identify_factory(form, '%s.gz' % form)) register_writer(form, obj, write_) register_reader(form, obj, read_)
def register_hdf5_array_io(array_type, format='hdf5', identify=True): """Registry read() and write() methods for the HDF5 format """ def from_hdf5(*args, **kwargs): kwargs.setdefault('array_type', array_type) return array_from_hdf5(*args, **kwargs) def to_hdf5(*args, **kwargs): kwargs.setdefault('array_type', array_type) return array_to_hdf5(*args, **kwargs) registry.register_reader(format, array_type, from_hdf5) registry.register_writer(format, array_type, to_hdf5) if identify: registry.register_identifier(format, array_type, hdf5io.identify_hdf5)
def decorator(func): logging.info("Added {} to loader registry.".format(label)) func.loader_wrapper = True format = label #"-".join(label.lower().split()) io_registry.register_reader(format, Spectrum1DRef, func) io_registry.register_identifier(format, Spectrum1DRef, identifier) @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
def decorate_registered_reader( name, data_class=EventTable, columns=True, selection=True, ): """Wrap an existing registered reader to use GWpy's input decorators Parameters ---------- name : `str` the name of the registered format data_class : `type`, optional the class for whom the format is registered columns : `bool`, optional use the `read_with_columns` decorator selection : `bool`, optional use the `read_with_selection` decorator """ reader = registry.get_reader(name, data_class) wrapped = ( # noqa read_with_columns( # use ``columns`` read_with_selection( # use ``selection`` reader )) ) return registry.register_reader(name, data_class, wrapped, force=True)
def test_register_reader(): io_registry.register_reader('test1', TestData, empty_reader) io_registry.register_reader('test2', TestData, empty_reader) assert io_registry.get_reader('test1', TestData) == empty_reader assert io_registry.get_reader('test2', TestData) == empty_reader io_registry.unregister_reader('test1', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_reader('test1', TestData) assert io_registry.get_reader('test2', TestData) == empty_reader io_registry.unregister_reader('test2', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_reader('test2', TestData)
def test_identifier_origin(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: o == 'read') io_registry.register_identifier('test2', TestData, lambda o, *x, **y: o == 'write') io_registry.register_reader('test1', TestData, empty_reader) io_registry.register_writer('test2', TestData, empty_writer) # There should not be too many formats defined TestData.read() TestData().write() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(format='test2') assert str(exc.value).startswith( "No reader defined for format 'test2' and class 'TestData'") with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(format='test1') assert str(exc.value).startswith( "No writer defined for format 'test1' and class 'TestData'")
def _load_yaml(self): """ Loads yaml files as custom loaders. """ cur_path = os.path.join(os.path.dirname(__file__), '..', 'io', 'yaml_loaders') usr_path = os.path.join(os.path.expanduser('~'), '.specviz') lines_path = os.path.join(os.path.dirname(__file__), '../data/linelists') # This order determines priority in case of duplicates; paths higher # in this list take precedence check_paths = [usr_path, cur_path, lines_path] if not os.path.exists(usr_path): os.mkdir(usr_path) for path in check_paths: for file_name in [x for x in os.listdir(path) if x.endswith('yaml')]: f_path = os.path.join(path, file_name) custom_loader = yaml.load(open(f_path, 'r')) custom_loader.set_filter() # Figure out which of the two generic loaders to associate # this yaml file with if any(ext in custom_loader.extension for ext in ['fits']): loader = FitsYamlRegister(custom_loader) elif any(ext in custom_loader.extension for ext in ['txt', 'data']): loader = AsciiYamlRegister(custom_loader) try: io_registry.register_reader(custom_loader.name, Spectrum1DRef, loader.reader) io_registry.register_identifier(custom_loader.name, Spectrum1DRef, loader.identify) except io_registry.IORegistryError as e: logging.error(e)
the file or document to write into attrs : `dict`, optional extra attributes to write into segment tables **kwargs keyword arguments to use when writing See also -------- gwpy.io.ligolw.write_ligolw_tables for details of acceptable keyword arguments """ if isinstance(flags, DataQualityFlag): flags = DataQualityDict({flags.name: flags}) return write_tables(target, flags.to_ligolw_tables(**attrs or dict()), **kwargs) # -- register ----------------------------------------------------------------- # register methods for DataQualityDict io_registry.register_reader('ligolw', DataQualityFlag, read_ligolw_flag) io_registry.register_writer('ligolw', DataQualityFlag, write_ligolw) io_registry.register_identifier('ligolw', DataQualityFlag, is_xml) # register methods for DataQualityDict io_registry.register_reader('ligolw', DataQualityDict, read_ligolw_dict) io_registry.register_writer('ligolw', DataQualityDict, write_ligolw) io_registry.register_identifier('ligolw', DataQualityDict, is_xml)
:lal:`LALCache`. """ cacheobj = args[3] if isinstance(cacheobj, Cache): return True try: from lal import Cache as LALCache except ImportError: pass else: if isinstance(cacheobj, LALCache): return True return False registry.register_reader('lcf', TimeSeries, read_cache) registry.register_reader('cache', TimeSeries, read_cache) registry.register_identifier('lcf', TimeSeries, identify_cache_file) registry.register_identifier('cache', TimeSeries, identify_cache) # duplicate for state-vector registry.register_reader('lcf', StateVector, read_state_cache) registry.register_reader('cache', StateVector, read_state_cache) registry.register_identifier('lcf', StateVector, identify_cache_file) registry.register_identifier('cache', StateVector, identify_cache) # TimeSeriesDict registry.register_reader('lcf', TimeSeriesDict, read_dict_cache) registry.register_reader('cache', TimeSeriesDict, read_dict_cache) registry.register_reader('lcfmp', TimeSeriesDict, read_dict_cache) registry.register_identifier('lcf', TimeSeriesDict, identify_cache_file)
# GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """This module attaches the HDF5 input output methods to the TimeSeries. While these methods are avialable as methods of the class itself, this module attaches them to the unified I/O registry, making it a bit cleaner. """ from astropy.io.registry import (register_reader, register_writer, register_identifier) from ... import version from ...io.hdf5 import identify_hdf5 from ..core import TimeSeries from ..statevector import StateVector __author__ = 'Duncan Macleod <*****@*****.**>' __version__ = version.version register_reader('hdf', TimeSeries, TimeSeries.from_hdf5) register_writer('hdf', TimeSeries, TimeSeries.to_hdf5) register_identifier('hdf', TimeSeries, identify_hdf5) register_reader('hdf', StateVector, StateVector.from_hdf5) register_writer('hdf', StateVector, StateVector.to_hdf5) register_identifier('hdf', StateVector, identify_hdf5)
data quality flag to print fobj : `file`, `str` open file object, or file path, to write to header : `bool`, optional print header into the file, default: `True` coltype : `type`, optional numerical type in which to cast times before printing Notes ----- In this format, only the :attr:`~gwpy.segments.flag.DataQualityFlag.active` segments are printed See Also -------- :mod:`glue.segmentsUtils` for definition of the segwizard format, and the to/from functions used in this GWpy module """ to_segwizard(flag.active, fobj, header=header, coltype=coltype) registry.register_reader('segwizard', DataQualityFlag, flag_from_segwizard) registry.register_writer('segwizard', DataQualityFlag, flag_to_segwizard) registry.register_identifier('segwizard', DataQualityFlag, identify_segwizard) registry.register_reader('segwizard', SegmentList, from_segwizard) registry.register_writer('segwizard', SegmentList, to_segwizard) registry.register_identifier('segwizard', SegmentList, identify_segwizard)
nevents = tree.GetEntries() for i in range(nevents): tree.GetEntry() burst = sngl_burst_from_root(tree, columns=columns) if filt is None or filt(burst): append(burst) return out def identify_omicron(*args, **kwargs): """Determine an input object as an Omicron-format ROOT file. """ fp = args[3] if isinstance(fp, file): fp = fp.name elif isinstance(fp, CacheEntry): fp = fp.path # identify string if (isinstance(fp, (unicode, str)) and fp.endswith('root') and 'omicron' in fp.lower()): return True # identify cache object else: return False registry.register_reader('omicron', lsctables.SnglBurstTable, table_from_root) registry.register_identifier('omicron', lsctables.SnglBurstTable, identify_omicron)
#integer(kind=4) :: column_pointer(code_uvt_last) = code_null ! Back pointer to the columns... #integer(kind=4) :: column_size(code_uvt_last) = 0 ! Number of columns for each #! In the data, we instead have the codes for each column #! integer(kind=4) :: column_codes(nlead+ntrail) ! Start column for each ... #! integer(kind=4) :: column_types(nlead+ntrail) /0,1,2/ ! Number of columns for each: 1 real*4, 2 real*8 #! Leading / Trailing information codes #! #integer(kind=4) :: order = 0 ! 13 Stoke/Channel ordering #integer(kind=4) :: nfreq = 0 ! 14 ! 0 or = nchan*nstokes #integer(kind=4) :: atoms(4) ! 15-18 Atom description #! #real(kind=8), pointer :: freqs(:) => null() ! (nchan*nstokes) = 0d0 #integer(kind=4), pointer :: stokes(:) => null() ! (nchan*nstokes) or (nstokes) = code_stoke #! #real(kind=8), pointer :: ref(:) => null() #real(kind=8), pointer :: val(:) => null() #real(kind=8), pointer :: inc(:) => null() lf.seek(1024) real_dims = dims[:ndim] data = np.fromfile(lf, count=np.product(real_dims), dtype='float32').reshape(real_dims[::-1]) data[data == bval] = np.nan return data, header io_registry.register_reader('lmv', BaseSpectralCube, load_lmv_cube) io_registry.register_reader('class_lmv', BaseSpectralCube, load_lmv_cube) io_registry.register_identifier('lmv', BaseSpectralCube, is_lmv)
tab = Table.read(hdu, format='fits') for colname in tab.colnames: # Fix units if tab[colname].unit == 'e-/s': tab[colname].unit = 'electron/s' # Rename columns to lowercase tab.rename_column(colname, colname.lower()) # Filter out NaN rows nans = np.isnan(tab['time'].data) if np.any(nans): warnings.warn('Ignoring {0} rows with NaN times'.format(np.sum(nans))) tab = tab[~nans] # Compute Time object time = reference_date + TimeDelta(tab['time'].data) # Remove original time column tab.remove_column('time') # Create time series ts = TimeSeries(time=time, data=tab) return ts registry.register_reader('tess.fits', TimeSeries, tess_fits_reader)
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """This module attaches the HDF5 input output methods to the TimeSeries. While these methods are avialable as methods of the class itself, this module attaches them to the unified I/O registry, making it a bit cleaner. """ from astropy.io.registry import (register_reader, register_writer, register_identifier) from ... import version from ...io.hdf5 import identify_hdf5 from ..core import TimeSeries from ..statevector import StateVector __author__ = 'Duncan Macleod <*****@*****.**>' __version__ = version.version register_reader('hdf', TimeSeries, TimeSeries.from_hdf5) register_writer('hdf', TimeSeries, TimeSeries.to_hdf5) register_identifier('hdf', TimeSeries, identify_hdf5) register_reader('hdf', StateVector, StateVector.from_hdf5) register_writer('hdf', StateVector, StateVector.to_hdf5) register_identifier('hdf', StateVector, identify_hdf5)
append = out.append # iterate over files for f in files: trigs = loadtxt(f, dtype=float) for t, f, snr in trigs: b = lsctables.SnglBurst() b.set_peak(LIGOTimeGPS(float(t))) b.peak_frequency = f b.snr = snr if filt is None or filt(b): append(b) return out register_reader('hveto', lsctables.SnglBurstTable, read_hveto_triggers) class HvetoSegmentSummaryPlot(SegmentPlot): """Custom SegmentSummaryPlot to handle unkown numbers of hveto rounds """ type = 'hveto-segments' defaults = SegmentPlot.defaults.copy() defaults.update({ 'on_is_bad': True, 'valid': None, 'insetlabels': False, }) def find_flags(self): # work out flags on-the-fly
def decorator(func): io_registry.register_reader(label, dtype, func) if identifier is None: # If the identifier is not defined, but the extensions are, create # a simple identifier based off file extension. if extensions is not None: logging.info("'{}' data loader provided for {} without " "explicit identifier. Creating identifier using " "list of compatible extensions".format( label, dtype.__name__)) id_func = lambda *args, **kwargs: any( [args[1].endswith(x) for x in extensions]) # Otherwise, create a dummy identifier else: logging.warning("'{}' data loader provided for {} without " "explicit identifier or list of compatible " "extensions".format(label, dtype.__name__)) id_func = lambda *args, **kwargs: True else: id_func = identifier_wrapper(identifier) io_registry.register_identifier(label, dtype, id_func) # Include the file extensions as attributes on the function object func.extensions = extensions # Include priority on the loader function attribute func.priority = priority # Sort the io_registry based on priority sorted_loaders = sorted( io_registry._readers.items(), key=lambda item: getattr(item[1], 'priority', 0)) # Update the registry with the sorted dictionary io_registry._readers.clear() io_registry._readers.update(sorted_loaders) logging.debug("Successfully loaded reader \"{}\".".format(label)) # Automatically register a SpectrumList reader for any data_loader that # reads Spectrum1D objects. TODO: it's possible that this # functionality should be opt-in rather than automatic. if dtype is Spectrum1D: def load_spectrum_list(*args, **kwargs): return SpectrumList([func(*args, **kwargs)]) # Add these attributes to the SpectrumList reader as well load_spectrum_list.extensions = extensions load_spectrum_list.priority = priority io_registry.register_reader(label, SpectrumList, load_spectrum_list) io_registry.register_identifier(label, SpectrumList, id_func) logging.debug( "Created SpectrumList reader for \"{}\".".format(label)) @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
if channel.sample_rate: params.setdefault('sampleFrequency', channel.sample_rate.to('Hz').value) if channel.frequency_range: low, hi = channel.frequency_range.to('Hz').value params.setdefault('searchFrequencyRange', (low, hi)) if 'qlow' in params or 'qhigh' in params: qlow = params.pop('qlow', 'sqrt(11)') qhigh = params.pop('qhigh', 64) params.setdefault('searchQRange', (qlow, qhigh)) # write params for key in ['channelName', 'frameType']: if key not in params: raise KeyError("No %r defined for %s" % (key, str(channel))) for key, value in params.iteritems(): key = '%s:' % str(key) if isinstance(value, tuple): value = '[%s]' % ' '.join(map(str, value)) elif isinstance(value, float) and value.is_integer(): value = int(value) elif isinstance(value, str): value = repr(value) print(' {0: <30} {1}'.format(key, value), file=file) print('}', file=file) # -- registry ----------------------------------------------------------------- registry.register_reader('omega-scan', ChannelList, read_omega_scan_config) registry.register_writer('omega-scan', ChannelList, write_omega_scan_config)
(`quality_bitmask=1664431`). This is known to remove good data. * "hardest": removes all data that has been flagged (`quality_bitmask=2096639`). This mask is not recommended. See the :class:`KeplerQualityFlags` class for details on the bitmasks. """ lc = _read_lightcurve_fits_file(filename, flux_column=flux_column, time_format='btjd') # Filter out poor-quality data # NOTE: Unfortunately Astropy Table masking does not yet work for columns # that are Quantity objects, so for now we remove poor-quality data instead # of masking. Details: https://github.com/astropy/astropy/issues/10119 quality_mask = TessQualityFlags.create_quality_mask( quality_array=lc['quality'], bitmask=quality_bitmask) lc = lc[quality_mask] lc.meta['targetid'] = lc.meta.get('ticid') lc.meta['quality_bitmask'] = quality_bitmask lc.meta['quality_mask'] = quality_mask return TessLightCurve(data=lc) """ADD READERS TO THE REGISTRY""" try: registry.register_reader('kepler', LightCurve, read_kepler_lightcurve) registry.register_reader('tess', LightCurve, read_tess_lightcurve) except registry.IORegistryError: pass # necessary to enable autoreload during debugging
def setup_module(module): """Setup module for tests.""" io_registry.register_reader("json", Cosmology, read_json) io_registry.register_writer("json", Cosmology, write_json) io_registry.register_identifier("json", Cosmology, json_identify)
def read_dat(filepath, fcol=0, ampcol=1, **kwargs): """Read a `Spectrum` from a txt file """ frequency, amplitude = numpy.loadtxt(filepath, usecols=[fcol, ampcol], unpack=True) return Spectrum(amplitude, frequencies=frequency, **kwargs) def identify_dat(*args, **kwargs): """Identify the given file as a dat file, rather than anything else Returns ------- True if the filename endswith .txt or .dat False otherwise """ filename = args[1][0] if not isinstance(filename, basestring): filename = filename.name if filename.endswith('txt') or filename.endswith('dat'): return True return False # register this file-reader with the Spectrum class registry.register_reader('dat', Spectrum, read_dat, force=True) registry.register_identifier('dat', Spectrum, identify_dat)
flags = tab.QUALITY.flatten() finally: hdus.close() dispersion_unit = tab.columns[_find_col_index(tab.columns, 'WAVELENGTH')].unit.lower() flux_unit = _get_unit(tab.columns[_find_col_index(tab.columns, 'FLUX')].unit) wcs = WCSTable(wave, dispersion_unit) spec = Spectrum1D(data=flux, uncertainty=StdDevUncertainty(sigma, flux_unit), meta=meta, unit=flux_unit, mask=(flags != 0), wcs=wcs) #flags=flags) return spec registry.register_reader('IUE-mxlo', Spectrum1D, read_IUE_mxlo) def identify_IUE_mxlo(origin, *args, **kwargs): return (isinstance(args[0], six.string_types) and os.path.splitext(args[0].lower())[1] == '.mxlo') registry.register_identifier('IUE-mxlo', Spectrum1D, identify_IUE_mxlo)
def test_register_reader_invalid(): io_registry.register_reader('test', TestData, empty_reader) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_reader('test', TestData, empty_reader) assert (str(exc.value) == "Reader for format 'test' and class 'TestData' " "is already defined")
The format of table data to write. Must be one of ``tabledata`` (text representation), ``binary`` or ``binary2``. Default is ``tabledata``. See :ref:`votable-serialization`. """ # Only those columns which are instances of BaseColumn or Quantity can be written unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity)) if unsupported_cols: unsupported_names = [col.info.name for col in unsupported_cols] raise ValueError( 'cannot write table with mixin column(s) {0} to VOTable'.format( unsupported_names)) # Check if output file already exists if isinstance(output, str) and os.path.exists(output): if overwrite: os.remove(output) else: raise OSError("File exists: {0}".format(output)) # Create a new VOTable file table_file = from_table(input, table_id=table_id) # Write out file table_file.to_xml(output, tabledata_format=tabledata_format) io_registry.register_reader('votable', Table, read_table_votable) io_registry.register_writer('votable', Table, write_table_votable) io_registry.register_identifier('votable', Table, is_votable)
# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """This module attaches the HDF5 input output methods to the Spectrogram. While these methods are avialable as methods of the class itself, this module attaches them to the unified I/O registry, making it a bit cleaner. """ from astropy.io.registry import (register_reader, register_writer, register_identifier) from ... import version from ...io.hdf5 import identify_hdf5 from ..core import Spectrogram __author__ = 'Duncan Macleod <*****@*****.**>' __version__ = version.version register_reader('hdf', Spectrogram, Spectrogram.from_hdf5) register_writer('hdf', Spectrogram, Spectrogram.to_hdf5) register_identifier('hdf', Spectrogram, identify_hdf5)
#!/usr/bin/env python # encoding: utf-8 """ Reader for XVISTA .prof tables. """ import numpy as np from astropy.table import Table from astropy.io import registry def xvista_table_reader(filename): dt = [('R', np.float), ('SB', np.float), ('SB_err', np.float), ('ELL', np.float), ('PA', np.float), ('EMAG', np.float), ('ELLMAG', np.float), ('ELLMAG_err', np.float), ('XC', np.float), ('YC', np.float), ('FRACONT', np.float), ('A1', np.float), ('A2', np.float), ('A4', np.float), ('CIRCMAG', np.float)] data = np.genfromtxt(filename, dtype=np.dtype(dt), skip_header=15, missing_values='*', filling_values=np.nan) return Table(data) registry.register_reader('xvistaprof', Table, xvista_table_reader)
output : str The filename to write the table to. overwrite : bool Whether to overwrite any existing file without warning. append : bool Whether to append the table to an existing file """ # Encode any mixin columns into standard Columns. input = _encode_mixins(input) table_hdu = table_to_hdu(input, character_as_bytes=True) # Check if output file already exists if isinstance(output, str) and os.path.exists(output): if overwrite: os.remove(output) elif not append: raise OSError(f"File exists: {output}") if append: # verify=False stops it reading and checking the existing file. fits_append(output, table_hdu.data, table_hdu.header, verify=False) else: table_hdu.writeto(output) io_registry.register_reader('fits', Table, read_table_fits) io_registry.register_writer('fits', Table, write_table_fits) io_registry.register_identifier('fits', Table, is_fits)
def add_polyco_file_format(self, formatName, methodMood, readMethod=None, writeMethod=None): """ Add a polyco file format and its reading/writing method to the class. Then register it to the table reading. Parameters --------- formatName : str The name for the format. methodMood : str ['r','w','rw']. 'r' represent as reading 'w' represent as writting 'rw' represent as reading and writting readMethod : method The method for reading the file format. writeMethod : method The method for writting the file to disk. """ # Check if the format already exist. if (formatName in [f["format"] for f in self.polycoFormat] or formatName in registry.get_formats()["Format"]): errorMssg = "Format name '" + formatName + "' is already exist. " raise ValueError(errorMssg) pFormat = {"format": formatName} if methodMood == "r": if readMethod is None: raise ValueError("Argument readMethod should not be 'None'.") pFormat["read_method"] = readMethod pFormat["write_method"] = writeMethod registry.register_reader(pFormat["format"], table.Table, pFormat["read_method"]) elif methodMood == "w": if writeMethod is None: raise ValueError("Argument writeMethod should not be 'None'.") pFormat["read_method"] = readMethod pFormat["write_method"] = writeMethod registry.register_writer(pFormat["format"], table.Table, pFormat["write_method"]) elif methodMood == "rw": if readMethod is None or writeMethod is None: raise ValueError("Argument readMethod and writeMethod " "should not be 'None'.") pFormat["read_method"] = readMethod pFormat["write_method"] = writeMethod registry.register_reader(pFormat["format"], table.Table, pFormat["read_method"]) registry.register_writer(pFormat["format"], table.Table, pFormat["write_method"]) self.polycoFormat.append(pFormat)
def kepler_fits_reader(filename): # Parse Kepler FITS file with regular FITS reader tab = Table.read(filename, format='fits') for colname in tab.colnames: # Fix units if tab[colname].unit == 'e-/s': tab[colname].unit = 'electron/s' # Rename columns to lowercase tab.rename_column(colname, colname.lower()) # Compute Time object time = Time(tab['time'].data + 2454833, scale='tcb', format='jd') # Remove original time column tab.remove_column('time') # Create time series ts = SampledTimeSeries(time=time, data=tab) ts.time.format = 'isot' return ts registry.register_reader('kepler.fits', SampledTimeSeries, kepler_fits_reader)
# append row by row names = table.dtype.names for row in table: rowd = dict((n, row[n]) for n in names) gps = LIGOTimeGPS(rowd.pop('time', 0)) frame.AppendFrEvent( FrEvent( str(name), str(rowd.pop('comment', '')), str(rowd.pop('inputs', '')), GPSTime(gps.gpsSeconds, gps.gpsNanoSeconds), float(rowd.pop('timeBefore', 0)), float(rowd.pop('timeAfter', 0)), int(rowd.pop('eventStatus', 0)), float(rowd.pop('amplitude', 0)), float(rowd.pop('probability', -1)), str(rowd.pop('statistics', '')), list(rowd.items()), # remaining params as tuple )) # write frame to file io_gwf.write_frames(filename, [frame], **write_kw) # -- registration ------------------------------------------------------------- for table_class in (Table, EventTable): io_registry.register_reader('gwf', table_class, table_from_gwf) io_registry.register_writer('gwf', table_class, table_to_gwf) io_registry.register_identifier('gwf', table_class, io_gwf.identify_gwf)
def test_read_valid_return(): io_registry.register_reader('test', TestData, lambda: TestData()) t = TestData.read(format='test') assert isinstance(t, TestData)
def add_polyco_file_format(self, formatName, methodMood, readMethod=None, writeMethod=None): """ Add a polyco file format and its reading/writting method to the class. Then register it to the table reading. Parameters --------- formatName : str The name for the format. methodMood : str ['r','w','rw']. 'r' represent as reading 'w' represent as writting 'rw' represent as reading and writting readMethod : method The method for reading the file format. writeMethod : method The method for writting the file to disk. """ # Check if the format already exist. if (formatName in [f['format'] for f in self.polycoFormat] or formatName in registry.get_formats()['Format']): errorMssg = 'Format name \'' + formatName + '\' is already exist. ' raise Exception(errorMssg) pFormat = {'format': formatName} if methodMood == 'r': if readMethod == None: raise BaseException( 'Argument readMethod should not be \'None\'.') pFormat['read_method'] = readMethod pFormat['write_method'] = writeMethod registry.register_reader(pFormat['format'], table.Table, pFormat['read_method']) elif methodMood == 'w': if writeMethod == None: raise BaseException( 'Argument writeMethod should not be \'None\'.') pFormat['read_method'] = readMethod pFormat['write_method'] = writeMethod registry.register_writer(pFormat['format'], table.Table, pFormat['write_method']) elif methodMood == 'rw': if readMethod == None or writeMethod == None: raise BaseException('Argument readMethod and writeMethod' 'should not be \'None\'.') pFormat['read_method'] = readMethod pFormat['write_method'] = writeMethod registry.register_reader(pFormat['format'], table.Table, pFormat['read_method']) registry.register_writer(pFormat['format'], table.Table, pFormat['write_method']) self.polycoFormat.append(pFormat)
group = channel.group if not out.has_section(group): out.add_section(group) for param, value in channel.params.iteritems(): out.set(group, param, value) if channel.sample_rate: entry = '%s %s' % (str(channel), str(channel.sample_rate.to('Hz').value)) else: entry = str(channel) try: cl = out.get(group, 'channels') except configparser.NoOptionError: out.set(group, 'channels', '\n%s' % entry) else: out.set(group, 'channels', cl + '\n%s' % entry) if isinstance(fobj, file): close = False else: fobj = open(fobj, 'w') close = True out.write(fobj) if close: fobj.close() registry.register_reader('ini', ChannelList, read_channel_list_file) registry.register_identifier('ini', ChannelList, identify_factory('.ini', '.clf')) registry.register_writer('ini', ChannelList, write_channel_list_file)
'PathlossModel', 'PersistenceSatModel', 'PixelAreaModel', 'NirspecSlitAreaModel', 'NirspecMosAreaModel', 'NirspecIfuAreaModel', 'FgsImgPhotomModel', 'MirImgPhotomModel', 'MirLrsPhotomModel', 'MirMrsPhotomModel', 'NrcImgPhotomModel', 'NrcWfssPhotomModel', 'NisImgPhotomModel', 'NisSossPhotomModel', 'NisWfssPhotomModel', 'NrsFsPhotomModel', 'NrsMosPhotomModel', 'PsfMaskModel', 'QuadModel', 'RampModel', 'MIRIRampModel', 'RampFitOutputModel', 'ReadnoiseModel', 'ReferenceFileModel', 'ReferenceCubeModel', 'ReferenceImageModel', 'ReferenceQuadModel', 'RegionsModel', 'ResetModel', 'ResolutionModel', 'MiriResolutionModel', 'RSCDModel', 'SaturationModel', 'SlitDataModel', 'SlitModel', 'SpecModel', 'SourceModelContainer', 'StepParsModel', 'StrayLightModel', 'SuperBiasModel', 'SpecwcsModel', 'ThroughputModel', 'TrapDensityModel', 'TrapParsModel', 'TrapsFilledModel', 'TsoPhotModel', 'WavelengthrangeModel', 'WaveCorrModel', 'WfssBkgModel' ] # Initialize the astropy.io registry, # but only the first time this module is called try: _defined_models except NameError: with registry.delay_doc_updates(DataModel): registry.register_reader('datamodel', DataModel, ndmodel.read) registry.register_writer('datamodel', DataModel, ndmodel.write) registry.register_identifier('datamodel', DataModel, ndmodel.identify) _all_models = __all__[1:] _local_dict = locals() _defined_models = {k: _local_dict[k] for k in _all_models}
def test_register_reader_force(): io_registry.register_reader('test', TestData, empty_reader) io_registry.register_reader('test', TestData, empty_reader, force=True)
raise NotImplementedError() def write_fits_ldo(data, filename, overwrite=False): # Spectra may have HDUList objects instead of HDUs because they # have a beam table attached, so we want to try that first # (a more elegant way to write this might be to do "self._hdu_general.write" # and create a property `self._hdu_general` that selects the right one...) if hasattr(data, 'hdulist'): try: data.hdulist.writeto(filename, overwrite=overwrite) except TypeError: data.hdulist.writeto(filename, clobber=overwrite) elif hasattr(data, 'hdu'): try: data.hdu.writeto(filename, overwrite=overwrite) except TypeError: data.hdu.writeto(filename, clobber=overwrite) io_registry.register_reader('fits', BaseSpectralCube, load_fits_cube) io_registry.register_writer('fits', BaseSpectralCube, write_fits_cube) io_registry.register_identifier('fits', BaseSpectralCube, is_fits) io_registry.register_reader('fits', StokesSpectralCube, load_fits_cube) io_registry.register_writer('fits', StokesSpectralCube, write_fits_cube) io_registry.register_identifier('fits', StokesSpectralCube, is_fits) io_registry.register_writer('fits', LowerDimensionalObject, write_fits_ldo) io_registry.register_identifier('fits', LowerDimensionalObject, is_fits)
Raises ------- ValueError - If ``self.mask`` is set but not a `numpy.ndarray`. - If ``self.uncertainty`` is set but not a `~astropy.nddata.StdDevUncertainty`. - If ``self.uncertainty`` is set but has another unit then ``self.data``. NotImplementedError Saving flags is not supported. """ hdu = ccd_data.to_hdu(hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty, hdu_flags=hdu_flags) hdu.writeto(filename, **kwd) registry.register_reader('fits', CCDData, fits_ccddata_reader) registry.register_writer('fits', CCDData, fits_ccddata_writer) registry.register_identifier('fits', CCDData, fits.connect.is_fits) try: CCDData.read.__doc__ = fits_ccddata_reader.__doc__ except AttributeError: CCDData.read.__func__.__doc__ = fits_ccddata_reader.__doc__ try: CCDData.write.__doc__ = fits_ccddata_writer.__doc__ except AttributeError: CCDData.write.__func__.__doc__ = fits_ccddata_writer.__doc__
the default `~astropy.io.fits.PrimaryHDU`. kwd : All additional keywords are passed to :py:mod:`astropy.io.fits` Raises ------ ValueError - If ``self.mask`` is set but not a `numpy.ndarray`. - If ``self.uncertainty`` is set but not a `~astropy.nddata.StdDevUncertainty`. - If ``self.uncertainty`` is set but has another unit then ``self.data``. NotImplementedError Saving flags is not supported. """ hdu = ccd_data.to_hdu( hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty, key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags, as_image_hdu=as_image_hdu) if as_image_hdu: hdu.insert(0, fits.PrimaryHDU()) hdu.writeto(filename, **kwd) with registry.delay_doc_updates(CCDData): registry.register_reader('fits', CCDData, fits_ccddata_reader) registry.register_writer('fits', CCDData, fits_ccddata_writer) registry.register_identifier('fits', CCDData, fits.connect.is_fits)
def astropy_tabular_data(*args, **kwargs): """ Build a data set from a table. We restrict ourselves to tables with 1D columns. All arguments are passed to astropy.table.Table.read(...). """ from distutils.version import LooseVersion from astropy import __version__ if LooseVersion(__version__) < LooseVersion("0.2"): raise RuntimeError("Glue requires astropy >= v0.2. Please update") result = Data() # Read the table from astropy.table import Table # Add identifiers for ASCII data from astropy.io import registry if LooseVersion(__version__) < LooseVersion("0.3"): registry.register_identifier('ascii', Table, _ascii_identifier_v02, force=True) else: # Basically, we always want the plain ascii reader for now. # But astropy will complain about ambiguous formats (or use another reader) # unless we remove other registry identifiers and set up our own reader nope = lambda *a, **k: False registry.register_identifier('ascii.glue', Table, _ascii_identifier_v03, force=True) registry.register_identifier('ascii.csv', Table, nope, force=True) registry.register_identifier('ascii.fast_csv', Table, nope, force=True) registry.register_identifier('ascii', Table, nope, force=True) registry.register_reader('ascii.glue', Table, lambda path: Table.read(path, format='ascii'), force=True) # Import FITS compatibility (for Astropy 0.2.x) from ..external import fits_io try: table = Table.read(*args, **kwargs) except: # In Python 3, as of Astropy 0.4, if the format is not specified, the # automatic format identification will fail (astropy/astropy#3013). # This is only a problem for ASCII formats however, because it is due # to the fact that the file object in io.ascii does not rewind to the # start between guesses (due to a bug), so here we can explicitly try # the ASCII format if the format keyword was not already present. if 'format' not in kwargs: table = Table.read(*args, format='ascii.glue', **kwargs) else: raise # Loop through columns and make component list for column_name in table.columns: c = table[column_name] u = c.unit if hasattr(c, 'unit') else c.units if table.masked: # fill array for now try: c = c.filled(fill_value=np.nan) except ValueError: # assigning nan to integer dtype c = c.filled(fill_value=-1) nc = Component.autotyped(c, units=u) result.add_component(nc, column_name) return result
if tab[colname].unit == 'e-/s': tab[colname].unit = 'electron/s' if tab[colname].unit == 'pixels': tab[colname].unit = 'pixel' # Rename columns to lowercase tab.rename_column(colname, colname.lower()) # Filter out NaN rows nans = np.isnan(tab['time'].data) if np.any(nans): warnings.warn('Ignoring {0} rows with NaN times'.format(np.sum(nans))) tab = tab[~nans] # Time column is dependent on source and we correct it here reference_date = Time(hdu.header['BJDREFI'], hdu.header['BJDREFF'], scale=hdu.header['TIMESYS'].lower(), format='jd') time = reference_date + TimeDelta(tab['time'].data) time.format = 'isot' # Remove original time column tab.remove_column('time') return TimeSeries(time=time, data=tab) registry.register_reader('kepler.fits', TimeSeries, kepler_fits_reader) registry.register_reader('tess.fits', TimeSeries, kepler_fits_reader)
for vseg in flag.known: segsum = lsctables.SegmentSum() segsum.segment_def_id = segdef.segment_def_id segsum.set(map(LIGOTimeGPS, map(float, vseg))) segsum.comment = None segsum.segment_sum_id = lsctables.SegmentSumTable.get_next_id() segsum.process_id = process_id segsumtab.append(segsum) # write segment table (active segments) for aseg in flag.active: seg = lsctables.Segment() seg.segment_def_id = segdef.segment_def_id seg.set(map(LIGOTimeGPS, map(float, aseg))) seg.segment_id = lsctables.SegmentTable.get_next_id() seg.process_id = process_id segtab.append(seg) return xmldoc # register methods for DataQualityDict registry.register_reader('ligolw', DataQualityFlag, read_flag) registry.register_writer('ligolw', DataQualityFlag, write_ligolw) registry.register_identifier('ligolw', DataQualityFlag, identify_ligolw) # register methods for DataQualityDict registry.register_reader('ligolw', DataQualityDict, read_flag_dict) registry.register_writer('ligolw', DataQualityDict, write_ligolw) registry.register_identifier('ligolw', DataQualityDict, identify_ligolw)
def astropy_tabular_data(*args, **kwargs): """ Build a data set from a table. We restrict ourselves to tables with 1D columns. All arguments are passed to astropy.table.Table.read(...). """ from distutils.version import LooseVersion from astropy import __version__ if LooseVersion(__version__) < LooseVersion("0.2"): raise RuntimeError("Glue requires astropy >= v0.2. Please update") result = Data() # Read the table from astropy.table import Table # Add identifiers for ASCII data from astropy.io import registry if LooseVersion(__version__) < LooseVersion("0.3"): registry.register_identifier('ascii', Table, _ascii_identifier_v02, force=True) else: # Basically, we always want the plain ascii reader for now. # But astropy will complain about ambiguous formats (or use another reader) # unless we remove other registry identifiers and set up our own reader nope = lambda *a, **k: False registry.register_identifier('ascii.glue', Table, _ascii_identifier_v03, force=True) registry.register_identifier('ascii.csv', Table, nope, force=True) registry.register_identifier('ascii.fast_csv', Table, nope, force=True) registry.register_identifier('ascii', Table, nope, force=True) registry.register_reader('ascii.glue', Table, lambda path: Table.read(path, format='ascii'), force=True) try: table = Table.read(*args, **kwargs) except: # In Python 3, as of Astropy 0.4, if the format is not specified, the # automatic format identification will fail (astropy/astropy#3013). # This is only a problem for ASCII formats however, because it is due # to the fact that the file object in io.ascii does not rewind to the # start between guesses (due to a bug), so here we can explicitly try # the ASCII format if the format keyword was not already present. if 'format' not in kwargs: table = Table.read(*args, format='ascii.glue', **kwargs) else: raise # Loop through columns and make component list for column_name in table.columns: c = table[column_name] u = c.unit if hasattr(c, 'unit') else c.units if table.masked: # fill array for now try: c = c.filled(fill_value=np.nan) except ValueError: # assigning nan to integer dtype c = c.filled(fill_value=-1) nc = Component.autotyped(c, units=u) result.add_component(nc, column_name) return result
return outFile def makeEmptyTable(): """ Returns an empty sky model table. """ outlines = ['Z, Z, 0.0, 0.0, 0.0\n'] colNames = ['Name', 'Type', 'Ra', 'Dec', 'I'] converters = {} nameCol = 'col{0}'.format(colNames.index('Name')+1) converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))] typeCol = 'col{0}'.format(colNames.index('Type')+1) converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))] table = Table.read(outlines, guess=False, format='ascii.no_header', delimiter=',', names=colNames, comment='#', data_start=0, converters=converters) table.remove_rows(0) return table # Register the file reader, identifier, and writer functions with astropy.io registry.register_reader('makesourcedb', Table, skyModelReader) registry.register_identifier('makesourcedb', Table, skyModelIdentify) registry.register_writer('makesourcedb', Table, skyModelWriter) registry.register_writer('ds9', Table, ds9RegionWriter) registry.register_writer('kvis', Table, kvisAnnWriter) registry.register_writer('casa', Table, casaRegionWriter) registry.register_writer('factor', Table, factorDirectionsWriter)
from astropy.table import Table from astropy.io.registry import (register_identifier, register_reader, register_writer) from pydl.pydlutils.yanny import (is_yanny, read_table_yanny, write_table_yanny, yanny) from pathlib import Path import numpy as np import ppv.config register_identifier('yanny', Table, is_yanny) register_reader('yanny', Table, read_table_yanny) register_writer('yanny', Table, write_table_yanny) platePlans = Table.read('../data/raw/platePlans.par', format='yanny', tablename='PLATEPLANS') print('platePlans.par is read') is_mwm_plate = np.array(['mwm' in prun for prun in platePlans['platerun']]) is_bhm_plate = np.array(['bhm' in prun for prun in platePlans['platerun']]) is_sdss5_plate = np.bitwise_or(is_mwm_plate, is_bhm_plate) sdss5_plates = platePlans[is_sdss5_plate] # parent in root directory of repository dir_ = (Path.cwd().parent / ppv.config._src_dir) / 'data' out_filename = (dir_ / 'platePlans_sdss5.fits').as_posix() sdss5_plates.write(out_filename, overwrite='True', format='fits') print(f'SDSS-V platePlans table written to {out_filename}')
line['equivalent_width']) else: EW = space f.write( fmt.format(line['wavelength'], line['species'], line['expot'], line['loggf'], C6, D0, EW, line['comments']) + "\n") def write_latex(self, filename, sortby=['species', 'wavelength'], write_cols=['wavelength', 'element', 'expot', 'loggf']): new_table = self.copy() new_table.sort(sortby) new_table = new_table[write_cols] new_table.write(filename, format='ascii.aastex') ## Add to astropy.io registry def _moog_identifier(*args, **kwargs): try: # python 2 return isinstance(args[0], basestring) and args[0].lower().endswith(".moog") except: # python 3 return isinstance(args[0], str) and args[0].lower().endswith(".moog") registry.register_writer("moog", LineList, LineList.write_moog) registry.register_reader("moog", LineList, LineList.read_moog) registry.register_identifier("moog", LineList, _moog_identifier)
_id = kwd.pop('identifier', 'unknown') _title = kwd.pop('title', None) _squeeze = kwd.pop('squeeze', True) # suppress INFO messages about units in FITS file. e.g. useless ones like: # "INFO: using the unit erg / (cm2 s sr) passed to the FITS reader instead of the unit erg s-1 cm-2 sr-1 in the FITS file." log.setLevel('WARNING') z = CCDData.read( filename, unit=unit ) #,hdu,uu,hdu_uncertainty,hdu_mask,hdu_flags,key_uncertainty_type, **kwd) if _squeeze: z = utils.squeeze(z) # @TODO if uncertainty plane not present, look for RMS keyword # @TODO header values get stuffed into WCS, others may be dropped by CCDData._generate_wcs_and_update_header try: z = Measurement(z, unit=z._unit, title=_title) except Exception: raise TypeError( 'could not convert fits_measurement_reader output to Measurement') z.identifier(_id) # astropy.io.registry.read creates a FileIO object before calling the registered # reader (this method), so the filename is FileIO.name. z._filename = filename.name log.setLevel('INFO') # set back to default return z with registry.delay_doc_updates(Measurement): registry.register_reader('fits', Measurement, fits_measurement_reader)