def test_read_pickles_0_11_0(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_read_pickles_0_11_0 on non-little endian") pth = tm.get_data_path('legacy_pickle/0.11.0') for f in os.listdir(pth): vf = os.path.join(pth,f) self.compare(vf)
def read_pickles(self, version): if not is_little_endian(): raise nose.SkipTest("known failure on non-little endian") pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version))) for f in os.listdir(pth): vf = os.path.join(pth, f) self.compare(vf)
def read_pickles(self, version): if not is_little_endian(): raise nose.SkipTest("known failure on non-little endian") pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version))) for f in os.listdir(pth): vf = os.path.join(pth,f) self.compare(vf)
def test_read_pickles_0_11_0(self): if not is_little_endian(): raise nose.SkipTest( "known failure of test_read_pickles_0_11_0 on non-little endian" ) pth = tm.get_data_path('legacy_pickle/0.11.0') for f in os.listdir(pth): vf = os.path.join(pth, f) self.compare(vf)
def test_write_dta6(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta6 on non-little endian") original = self.read_csv(self.csv3) original.index.name = 'index' with ensure_clean(self.dta6) as path: original.to_stata(path, None, False) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_write_dta5(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta5 on non-little endian") original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) original.index.name = 'index' with ensure_clean(self.dta5) as path: original.to_stata(path, None, False) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_write_dta6(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta6 on " "non-little endian") original = self.read_csv(self.csv3) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, None, False) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_read_write_dta5(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta5 on " "non-little endian") original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, None, False) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def read_pickles(self, version): if not is_little_endian(): raise nose.SkipTest("known failure on non-little endian") pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version))) for f in os.listdir(pth): vf = os.path.join(pth,f) data = self.compare(vf) if data is None: continue if 'series' in data: if 'ts' in data['series']: self._validate_timeseries(data['series']['ts'], self.data['series']['ts'])
def test_read_write_dta10(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta10 on " "non-little endian") original = DataFrame(data=[["string", "object", 1, 1.1, np.datetime64('2003-12-25')]], columns=['string', 'object', 'integer', 'float', 'datetime']) original["object"] = Series(original["object"], dtype=object) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, {'datetime': 'tc'}, False) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def read_pickles(self, version): if not is_little_endian(): raise nose.SkipTest("known failure on non-little endian") pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version))) n = 0 for f in os.listdir(pth): vf = os.path.join(pth, f) data = self.compare(vf, version) if data is None: continue if 'series' in data: if 'ts' in data['series']: self._validate_timeseries(data['series']['ts'], self.data['series']['ts']) self._validate_frequency(data['series']['ts']) if 'index' in data: if 'period' in data['index']: self._validate_periodindex(data['index']['period'], self.data['index']['period']) n += 1 assert n > 0, 'Pickle files are not tested'
def read_pickles(self, version): if not is_little_endian(): raise nose.SkipTest("known failure on non-little endian") pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version))) n = 0 for f in os.listdir(pth): vf = os.path.join(pth, f) data = self.compare(vf) if data is None: continue if 'series' in data: if 'ts' in data['series']: self._validate_timeseries(data['series']['ts'], self.data['series']['ts']) self._validate_frequency(data['series']['ts']) if 'index' in data: if 'period' in data['index']: self._validate_periodindex(data['index']['period'], self.data['index']['period']) n += 1 assert n > 0, 'Pickle files are not tested'
def skip_if_not_little_endian(): if not is_little_endian(): raise nose.SkipTest("known failure of test on non-little endian")
import nose import sys from distutils.version import LooseVersion import numpy as np import pandas as pd from pandas.core.frame import DataFrame, Series from pandas.io.parsers import read_csv from pandas.io.stata import (read_stata, StataReader, InvalidColumnName, PossiblePrecisionLoss) import pandas.util.testing as tm from pandas.util.misc import is_little_endian from pandas import compat if not is_little_endian(): raise nose.SkipTest("known failure of test_stata on non-little endian") class TestStata(tm.TestCase): def setUp(self): # Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from: # http://stata-press.com/data/glmext.html self.dirpath = tm.get_data_path() self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta') self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta') self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta') self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta') self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta') self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')