def test_memory_from_table(numpy): table = asciitable.get_reader(numpy=numpy, Reader=asciitable.Daophot) data = table.read('t/daophot.dat') mem_table = asciitable.get_reader(Reader=asciitable.Memory, numpy=numpy) mem_data = mem_table.read(data) assert(data.dtype.names == mem_data.dtype.names) _test_values_equal(data, mem_data, numpy) mem_data = mem_table.read(mem_table) assert(data.dtype.names == mem_data.dtype.names) _test_values_equal(data, mem_data, numpy)
def test_write_table(): table = asciitable.get_reader(Reader=asciitable.Daophot) data = table.read('t/daophot.dat') for test_def in test_defs: yield check_write_table, test_def, table yield check_write_table, test_def, data
def test_write_table_no_numpy(): table = asciitable.get_reader(Reader=asciitable.Daophot, numpy=False) data = table.read("t/daophot.dat") for test_def in test_defs: yield check_write_table, test_def, table yield check_write_table, test_def, data
def test_write_table_no_numpy(): table = asciitable.get_reader(Reader=asciitable.Daophot, numpy=False) data = table.read('t/daophot.dat') for test_def in test_defs: check_write_table(test_def, table) check_write_table(test_def, data)
def test_custom_process_lines(numpy): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = asciitable.get_reader(delimiter='|', numpy=numpy) reader.inputter.process_lines = process_lines data = reader.read('t/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3)
def test_custom_process_line(numpy): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = asciitable.get_reader(data_start=2, delimiter='|', numpy=numpy) reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('t/nls1_stackinfo.dbout') cols = get_testfiles('t/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:])
def __init__(self): self.autoRerun = "" self.parseKnownIssues = "" self.errataName = "" self.errataLname = "" self.rerunedRSId = [] self.force = False self.__parseArgs() self.errataInfo = ErrataInfo(self.errataName, self.errataLname, False) self.resultPath = "./result" self.jobStatePath = "%s/%s.%s" % (self.resultPath, self.errataInfo.errataId, "jobstate") genLogger.info("jobStatePath : %s", self.jobStatePath) self.jobState = ConfigObj(self.jobStatePath, encoding="utf8") if self.parseKnownIssues == "y": self.knownIssuesPath = [] self.knownIssues = [] self.knownIssuesRPath = "./known_issues" self.knownIssuesPath.append( "%s/%s.%s" % (self.knownIssuesRPath, self.errataInfo.rhel_version, "known_issues") ) self.knownIssuesPath.append( "%s/RHEL-%s.%s" % (self.knownIssuesRPath, self.errataInfo.major, "known_issues") ) for i in range(0, len(self.knownIssuesPath)): str = "%d : %s" % (i, self.knownIssuesPath[i]) genLogger.info("knownIssuesPath%s" % str) self.knownIssues.append(ConfigObj(self.knownIssuesPath[i], encoding="utf8")) self.knownIssuesResult = "%s/%s.%s" % (self.resultPath, self.errataInfo.errataId, "knownIssues") self.unknownIssuesResult = "%s/%s.%s" % (self.resultPath, self.errataInfo.errataId, "unknownIssues") self.tableTemple = { "Path": ["---"], "TaskName": ["---"], "TaskResult": ["---"], "TaskStatus": ["---"], "ResultPath": ["---"], "PathResult": ["---"], "Checked": ["---"], } self.columns = ["Path", "TaskName", "TaskResult", "TaskStatus", "ResultPath", "PathResult", "Checked"] if not os.path.exists(self.knownIssuesResult): asciitable.write( self.tableTemple, self.knownIssuesResult, names=self.columns, Writer=asciitable.FixedWidth ) if not os.path.exists(self.unknownIssuesResult): asciitable.write( self.tableTemple, self.unknownIssuesResult, names=self.columns, Writer=asciitable.FixedWidth ) reader = asciitable.get_reader(Reader=asciitable.FixedWidth) self.knownIssuesTable = reader.read(self.knownIssuesResult) self.unknownIssuesTable = reader.read(self.unknownIssuesResult)
def run_ascii(): """ Load records with asciitable. * PyPy: OK. Development stopped (was moved into the Astropy project as astropy.io.ascii). * Source: https://github.com/taldcroft/asciitable * Docs: Decent * Independent: NumPy not required but recommended. * Small: no * Can specify column data types: yes. If trying to guess, will parse zips as numbers. * Can read in chunks: no * Can skip columns: yes * Can stream: no * Return type: wrapper around file or iterable, each row is a tuple * Memory usage: ~ 60 Mb * Timing: around 0.7 sec """ import asciitable import numpy reader = asciitable.get_reader( Reader=asciitable.FixedWidth, header_start=None, data_start=2, col_starts=(0, 5, 7, 35, 36, 41, 48, 56, 59, 65, 66, 67, 71, 75, 78), col_ends=(4, 6, 34, 35, 40, 47, 55, 58, 64, 65, 66, 70, 74, 77, 80), names=( 'zip_code', 'state_code', 'city_name', 'zip_type', 'county_code', 'latitude', 'longitude', 'area_code', 'finance_code', 'city_official', 'facility', 'msa_code', 'pmsa_code', 'filler' ), converters={ 'zip_code': [asciitable.convert_numpy(numpy.str)] }, include_names=( 'zip_code', 'state_code', 'city_name', 'county_code', 'latitude', 'longitude', 'area_code', 'msa_code', 'pmsa_code' ), ) data = reader.read( 'data/ZIP.DAT' ) records = 0 for row in data: records += 1 print 'Records:', records
def test_read_normal_names(numpy): """Nice, typical fixed format table with col names provided""" table = """ # comment (with blank line above) | Col1 | Col2 | | 1.2 | "hello" | | 2.4 |'s worlds| """ reader = asciitable.get_reader(Reader=asciitable.FixedWidth, names=('name1', 'name2')) dat = reader.read(table) assert_equal(reader.header.colnames, ('name1', 'name2')) assert_almost_equal(dat[1][0], 2.4)
def test_read_normal_exclude(numpy): """Nice, typical fixed format table with col name excluded""" table = """ # comment (with blank line above) | Col1 | Col2 | | 1.2 | "hello" | | 2.4 |'s worlds| """ reader = asciitable.get_reader(Reader=asciitable.FixedWidth, exclude_names=('Col1',)) dat = reader.read(table) assert_equal(reader.header.colnames, ('Col2',)) assert_equal(dat[1][0], "'s worlds")
def test_read_weird(numpy): """Weird input table with data values chopped by col extent """ table = """ Col1 | Col2 | 1.2 "hello" 2.4 sdf's worlds """ reader = asciitable.get_reader(Reader=asciitable.FixedWidth) dat = reader.read(table) assert_equal(reader.header.colnames, ('Col1', 'Col2')) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hel') assert_equal(dat[1][1], "df's wo")
def test_custom_splitters(numpy): reader = asciitable.get_reader(numpy=numpy) reader.header.splitter = asciitable.BaseSplitter() reader.data.splitter = asciitable.BaseSplitter() f = 't/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441)
def test_read_normal(numpy): """Nice, typical fixed format table""" table = """ # comment (with blank line above) | Col1 | Col2 | | 1.2 | "hello" | | 2.4 |'s worlds| """ reader = asciitable.get_reader(Reader=asciitable.FixedWidth) dat = reader.read(table) assert_equal(reader.header.colnames, ('Col1', 'Col2')) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds")
def test_daophot_header_keywords(numpy): reader = asciitable.get_reader(Reader=asciitable.DaophotReader, numpy=numpy) table = reader.read('t/daophot.dat') expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', 'hello world', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) for name, value, units, format_ in expected_keywords: for keyword in reader.keywords: if keyword.name == name: assert_equal(keyword.value, value) assert_equal(keyword.units, units) assert_equal(keyword.format, format_) break else: raise ValueError('Keyword not found')
def test_memory_from_LOL2(numpy): data = [[1, 2, 3], [4, 5.2, 6.1], [8, 9, 'hello']] mem_table = asciitable.get_reader(Reader=asciitable.Memory, numpy=numpy, names=('c1','c2','c3')) mem_data = mem_table.read(data) print(mem_data.dtype.names) assert(mem_data.dtype.names == ('c1', 'c2', 'c3')) if numpy: assert(mem_data[0][0] == 1) assert(mem_data[0][1] == 2) assert(mem_data[0][2] == '3') assert((mem_data['c2'] == np.array([2, 5.2, 9])).all()) assert((mem_data['c3'] == np.array([3, 6.1, 'hello'])).all()) else: assert(mem_data[0] == [1, 2, 3]) assert(mem_data['c2'] == [2, 5.2, 9]) assert(mem_data['c3'] == [3, 6.1, 'hello'])
def test_ipac_read_types(numpy): table = r"""\ | ra | dec | sai |-----v2---| sptype | | real | float | l | real | char | | unit | unit | unit | unit | ergs | | null | null | null | null | -999 | 2.09708 2956 73765 2.06000 B8IVpMnHg """ reader = asciitable.get_reader(Reader=asciitable.Ipac, numpy=numpy) dat = reader.read(table) types = [asciitable.FloatType, asciitable.FloatType, asciitable.IntType, asciitable.FloatType, asciitable.StrType] for (col, expected_type) in zip(reader.cols, types): assert_equal(col.type, expected_type)
def run_ascii(): """ Load records with asciitable. * PyPy: OK. Development stopped (was moved into the Astropy project as astropy.io.ascii). * Source: https://github.com/taldcroft/asciitable * Docs: Decent * Independent: NumPy not required but recommended. * Small: no * Can specify column data types: yes. If trying to guess, will parse zips as numbers. * Can read in chunks: no * Can skip columns: yes * Can stream: no * Return type: wrapper around file or iterable, each row is a tuple * Memory usage: ~ 60 Mb * Timing: around 0.7 sec """ import asciitable import numpy reader = asciitable.get_reader( Reader=asciitable.FixedWidth, header_start=None, data_start=2, col_starts=(0, 5, 7, 35, 36, 41, 48, 56, 59, 65, 66, 67, 71, 75, 78), col_ends=(4, 6, 34, 35, 40, 47, 55, 58, 64, 65, 66, 70, 74, 77, 80), names=('zip_code', 'state_code', 'city_name', 'zip_type', 'county_code', 'latitude', 'longitude', 'area_code', 'finance_code', 'city_official', 'facility', 'msa_code', 'pmsa_code', 'filler'), converters={'zip_code': [asciitable.convert_numpy(numpy.str)]}, include_names=('zip_code', 'state_code', 'city_name', 'county_code', 'latitude', 'longitude', 'area_code', 'msa_code', 'pmsa_code'), ) data = reader.read('data/ZIP.DAT') records = 0 for row in data: records += 1 print 'Records:', records
def test_memory_from_DOL(numpy): data = {'c1': [1, 2, 3], 'c2': [4, 5.2, 6.1], 'c3': [8, 9, 'hello']} mem_table = asciitable.get_reader(Reader=asciitable.Memory, numpy=numpy, names=sorted(data.keys())) mem_data = mem_table.read(data) assert(mem_data.dtype.names == ('c1', 'c2', 'c3')) if numpy: assert(mem_data[0][0] == 1) assert(mem_data[0][1] == 4) assert(mem_data[0][2] == '8') assert((mem_data['c2'] == np.array([4, 5.2, 6.1])).all()) assert((mem_data['c3'] == np.array([8, 9, 'hello'])).all()) else: assert(mem_data[0] == [1, 4, 8]) assert(mem_data['c2'] == [4, 5.2, 6.1]) assert(mem_data['c3'] == [8, 9, 'hello'])
def test_types_from_dat(numpy): if numpy: converters = {'a': [asciitable.convert_numpy(np.float)], 'e': [asciitable.convert_numpy(np.str)]} else: converters = {'a': [asciitable.convert_list(float)], 'e': [asciitable.convert_list(str)]} dat = asciitable.read(['a b c d e', '1 1 cat 2.1 4.2'], Reader=asciitable.Basic, converters=converters, numpy=numpy) reader = asciitable.get_reader(Reader=asciitable.Memory, numpy=numpy) reader.read(dat) print('numpy=%s' % numpy) print('dat=%s' % repr(dat)) print('reader.table=%s' % repr(reader.table)) print('types=%s' % repr([x.type for x in reader.cols])) assert_true(issubclass(reader.cols[0].type, asciitable.FloatType)) assert_true(issubclass(reader.cols[1].type, asciitable.IntType)) assert_true(issubclass(reader.cols[2].type, asciitable.StrType)) assert_true(issubclass(reader.cols[3].type, asciitable.FloatType)) assert_true(issubclass(reader.cols[4].type, asciitable.StrType))
def read_table2(readme, data): reader = asciitable.get_reader(Reader=asciitable.Cds, readme=readme) if asciitable.has_numpy: reader.outputter = asciitable.NumpyOutputter() return reader.read(data)
def test_comment_lines(numpy): table = asciitable.get_reader(Reader=asciitable.RdbReader, numpy=numpy) data = table.read('t/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment'])
""" Test case for CDS table writing Reads in an IPAC table and writes it out as a CDS table Unfortunately, as of 7/7/11, IPAC tables do not have a 'descr' attribute """ import asciitable # Generate an IPAC reader object ipac_reader = asciitable.get_reader(Reader=asciitable.Ipac) # Fill the IPAC reader from a table... ipac_table = ipac_reader.read('t/ipac2.dat') # create a CDS object cds_writer = asciitable.get_writer(Writer=asciitable.Cds) # write the ipac_reader Table using the CDS writer # (I think there should be a wrapper for this...) asciitable.write(ipac_reader,'t/ipac_convert_to_cds.cds',Writer=asciitable.Cds) cds_reader = asciitable.get_reader(Reader=asciitable.Cds) cds_table = cds_reader.read('t/cds2.dat') asciitable.write(cds_reader,'t/cds_convert_to_cds.cds',Writer=asciitable.Cds) test_read_ipaccds = cds_reader.read('t/ipac_convert_to_cds.cds') print test_read_ipaccds test_read_cdscds = cds_reader.read('t/cds_convert_to_cds.cds') print test_read_cdscds[:5]