def test_set_converters(numpy): converters = {'zabs1.nh': [asciitable.convert_numpy('int32'), asciitable.convert_numpy('float32')], 'p1.gamma': [asciitable.convert_numpy('str')] } data = asciitable.read('t/test4.dat', converters=converters, numpy=numpy) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764544642')
def get_ifot(event_type, start=None, stop=None, props=[], columns=[], timeout=TIMEOUT, types={}): start = DateTime('1998:001' if start is None else start) stop = DateTime(stop) event_props = '.'.join([event_type] + props) params = odict(r='home', t='qserver', format='tsv', tstart=start.date, tstop=stop.date, e=event_props, ul='7', ) if columns: params['columns'] = ','.join(columns) # Get the TSV data for the iFOT event table url = ROOTURL + URLS['ifot'] response = requests.get(url, auth=get_auth(), params=params, timeout=timeout) text = response.text.encode('ascii', 'ignore') text = re.sub(r'\r\n', ' ', text) lines = [x for x in text.split('\t\n') if x.strip()] converters = {key: [asciitable.convert_numpy(getattr(np, type_))] for key, type_ in types.items()} dat = asciitable.read(lines, Reader=asciitable.Tab, guess=False, converters=converters) return dat
def run_ascii(): """ Load records with asciitable. * PyPy: OK. Development stopped (was moved into the Astropy project as astropy.io.ascii). * Source: https://github.com/taldcroft/asciitable * Docs: Decent * Independent: NumPy not required but recommended. * Small: no * Can specify column data types: yes. If trying to guess, will parse zips as numbers. * Can read in chunks: no * Can skip columns: yes * Can stream: no * Return type: wrapper around file or iterable, each row is a tuple * Memory usage: ~ 60 Mb * Timing: around 0.7 sec """ import asciitable import numpy reader = asciitable.get_reader( Reader=asciitable.FixedWidth, header_start=None, data_start=2, col_starts=(0, 5, 7, 35, 36, 41, 48, 56, 59, 65, 66, 67, 71, 75, 78), col_ends=(4, 6, 34, 35, 40, 47, 55, 58, 64, 65, 66, 70, 74, 77, 80), names=( 'zip_code', 'state_code', 'city_name', 'zip_type', 'county_code', 'latitude', 'longitude', 'area_code', 'finance_code', 'city_official', 'facility', 'msa_code', 'pmsa_code', 'filler' ), converters={ 'zip_code': [asciitable.convert_numpy(numpy.str)] }, include_names=( 'zip_code', 'state_code', 'city_name', 'county_code', 'latitude', 'longitude', 'area_code', 'msa_code', 'pmsa_code' ), ) data = reader.read( 'data/ZIP.DAT' ) records = 0 for row in data: records += 1 print 'Records:', records
def test_types_from_dat(numpy): if numpy: converters = {'a': [asciitable.convert_numpy(np.float)], 'e': [asciitable.convert_numpy(np.str)]} else: converters = {'a': [asciitable.convert_list(float)], 'e': [asciitable.convert_list(str)]} dat = asciitable.read(['a b c d e', '1 1 cat 2.1 4.2'], Reader=asciitable.Basic, converters=converters, numpy=numpy) reader = asciitable.get_reader(Reader=asciitable.Memory, numpy=numpy) reader.read(dat) print('numpy=%s' % numpy) print('dat=%s' % repr(dat)) print('reader.table=%s' % repr(reader.table)) print('types=%s' % repr([x.type for x in reader.cols])) assert_true(issubclass(reader.cols[0].type, asciitable.FloatType)) assert_true(issubclass(reader.cols[1].type, asciitable.IntType)) assert_true(issubclass(reader.cols[2].type, asciitable.StrType)) assert_true(issubclass(reader.cols[3].type, asciitable.FloatType)) assert_true(issubclass(reader.cols[4].type, asciitable.StrType))
def run_ascii(): """ Load records with asciitable. * PyPy: OK. Development stopped (was moved into the Astropy project as astropy.io.ascii). * Source: https://github.com/taldcroft/asciitable * Docs: Decent * Independent: NumPy not required but recommended. * Small: no * Can specify column data types: yes. If trying to guess, will parse zips as numbers. * Can read in chunks: no * Can skip columns: yes * Can stream: no * Return type: wrapper around file or iterable, each row is a tuple * Memory usage: ~ 60 Mb * Timing: around 0.7 sec """ import asciitable import numpy reader = asciitable.get_reader( Reader=asciitable.FixedWidth, header_start=None, data_start=2, col_starts=(0, 5, 7, 35, 36, 41, 48, 56, 59, 65, 66, 67, 71, 75, 78), col_ends=(4, 6, 34, 35, 40, 47, 55, 58, 64, 65, 66, 70, 74, 77, 80), names=('zip_code', 'state_code', 'city_name', 'zip_type', 'county_code', 'latitude', 'longitude', 'area_code', 'finance_code', 'city_official', 'facility', 'msa_code', 'pmsa_code', 'filler'), converters={'zip_code': [asciitable.convert_numpy(numpy.str)]}, include_names=('zip_code', 'state_code', 'city_name', 'county_code', 'latitude', 'longitude', 'area_code', 'msa_code', 'pmsa_code'), ) data = reader.read('data/ZIP.DAT') records = 0 for row in data: records += 1 print 'Records:', records
#calculate outermag import asciitable, numpy as np, matplotlib.pyplot as plt, math converters = {'specobjID':[asciitable.convert_numpy(np.int64)], 'objID':[asciitable.convert_numpy(np.int64)]} total = asciitable.read('qso2.csv', converters = converters) totalm = total['modelmag_u'] innerm = total['fibermag_u'] totalflux = [] innerflux = [] outerflux = [] outerm = [] for i in range(len(total)): totalmag_i = totalm[i] innermag_i = innerm[i] totalf = 10**((totalmag_i + 48.6)/-2.5) innerf = 10**((innermag_i + 48.6)/-2.5) totalflux.append(totalf) innerflux.append(innerf) totalflux_i = totalflux[i] innerflux_i = innerflux[i] outerf = totalflux_i - innerflux_i outerflux.append(outerf) outerflux_i = outerflux[i] if outerflux_i < 0: outer_mag = 100 else: outer_mag = -2.5*(math.log10(outerflux_i))-48.6 outerm.append(outer_mag)