예제 #1
0
    def test_read_pickles_0_11_0(self):
        if not is_little_endian():
            raise nose.SkipTest("known failure of test_read_pickles_0_11_0 on non-little endian")

        pth = tm.get_data_path('legacy_pickle/0.11.0')
        for f in os.listdir(pth):
            vf = os.path.join(pth,f)
            self.compare(vf)
예제 #2
0
    def read_pickles(self, version):
        if not is_little_endian():
            raise nose.SkipTest("known failure on non-little endian")

        pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
        for f in os.listdir(pth):
            vf = os.path.join(pth, f)
            self.compare(vf)
예제 #3
0
    def read_pickles(self, version):
        if not is_little_endian():
            raise nose.SkipTest("known failure on non-little endian")

        pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
        for f in os.listdir(pth):
            vf = os.path.join(pth,f)
            self.compare(vf)
예제 #4
0
    def test_read_pickles_0_11_0(self):
        if not is_little_endian():
            raise nose.SkipTest(
                "known failure of test_read_pickles_0_11_0 on non-little endian"
            )

        pth = tm.get_data_path('legacy_pickle/0.11.0')
        for f in os.listdir(pth):
            vf = os.path.join(pth, f)
            self.compare(vf)
예제 #5
0
    def test_write_dta6(self):
        if not is_little_endian():
            raise nose.SkipTest("known failure of test_write_dta6 on non-little endian")
        
        original = self.read_csv(self.csv3)
        original.index.name = 'index'

        with ensure_clean(self.dta6) as path:
            original.to_stata(path, None, False)
            written_and_read_again = self.read_dta(path)
            tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
예제 #6
0
    def test_write_dta5(self):
        if not is_little_endian():
            raise nose.SkipTest("known failure of test_write_dta5 on non-little endian")
        
        original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
                             columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
        original.index.name = 'index'

        with ensure_clean(self.dta5) as path:
            original.to_stata(path, None, False)
            written_and_read_again = self.read_dta(path)
            tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
예제 #7
0
    def test_write_dta6(self):
        if not is_little_endian():
            raise nose.SkipTest("known failure of test_write_dta6 on "
                                "non-little endian")

        original = self.read_csv(self.csv3)
        original.index.name = 'index'

        with tm.ensure_clean() as path:
            original.to_stata(path, None, False)
            written_and_read_again = self.read_dta(path)
            tm.assert_frame_equal(written_and_read_again.set_index('index'),
                                  original)
예제 #8
0
    def test_read_write_dta5(self):
        if not is_little_endian():
            raise nose.SkipTest("known failure of test_write_dta5 on "
                                "non-little endian")

        original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
                             columns=['float_miss', 'double_miss', 'byte_miss',
                                      'int_miss', 'long_miss'])
        original.index.name = 'index'

        with tm.ensure_clean() as path:
            original.to_stata(path, None, False)
            written_and_read_again = self.read_dta(path)
            tm.assert_frame_equal(written_and_read_again.set_index('index'),
                                  original)
예제 #9
0
    def read_pickles(self, version):
        if not is_little_endian():
            raise nose.SkipTest("known failure on non-little endian")

        pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
        for f in os.listdir(pth):
            vf = os.path.join(pth,f)
            data = self.compare(vf)

            if data is None:
                continue

            if 'series' in data:
                if 'ts' in data['series']:
                    self._validate_timeseries(data['series']['ts'], self.data['series']['ts'])
예제 #10
0
    def test_read_write_dta10(self):
        if not is_little_endian():
            raise nose.SkipTest("known failure of test_write_dta10 on "
                                "non-little endian")

        original = DataFrame(data=[["string", "object", 1, 1.1,
                                    np.datetime64('2003-12-25')]],
                             columns=['string', 'object', 'integer', 'float',
                                      'datetime'])
        original["object"] = Series(original["object"], dtype=object)
        original.index.name = 'index'

        with tm.ensure_clean() as path:
            original.to_stata(path, {'datetime': 'tc'}, False)
            written_and_read_again = self.read_dta(path)
            tm.assert_frame_equal(written_and_read_again.set_index('index'),
                                  original)
예제 #11
0
    def read_pickles(self, version):
        if not is_little_endian():
            raise nose.SkipTest("known failure on non-little endian")

        pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
        n = 0
        for f in os.listdir(pth):
            vf = os.path.join(pth, f)
            data = self.compare(vf, version)

            if data is None:
                continue

            if 'series' in data:
                if 'ts' in data['series']:
                    self._validate_timeseries(data['series']['ts'], self.data['series']['ts'])
                    self._validate_frequency(data['series']['ts'])
            if 'index' in data:
                if 'period' in data['index']:
                    self._validate_periodindex(data['index']['period'],
                                               self.data['index']['period'])
            n += 1
        assert n > 0, 'Pickle files are not tested'
예제 #12
0
    def read_pickles(self, version):
        if not is_little_endian():
            raise nose.SkipTest("known failure on non-little endian")

        pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
        n = 0
        for f in os.listdir(pth):
            vf = os.path.join(pth, f)
            data = self.compare(vf)

            if data is None:
                continue

            if 'series' in data:
                if 'ts' in data['series']:
                    self._validate_timeseries(data['series']['ts'],
                                              self.data['series']['ts'])
                    self._validate_frequency(data['series']['ts'])
            if 'index' in data:
                if 'period' in data['index']:
                    self._validate_periodindex(data['index']['period'],
                                               self.data['index']['period'])
            n += 1
        assert n > 0, 'Pickle files are not tested'
예제 #13
0
def skip_if_not_little_endian():
    if not is_little_endian():
        raise nose.SkipTest("known failure of test on non-little endian")
예제 #14
0
def skip_if_not_little_endian():
    if not is_little_endian():
        raise nose.SkipTest("known failure of test on non-little endian")
예제 #15
0
import nose
import sys
from distutils.version import LooseVersion

import numpy as np

import pandas as pd
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
    PossiblePrecisionLoss)
import pandas.util.testing as tm
from pandas.util.misc import is_little_endian
from pandas import compat

if not is_little_endian():
    raise nose.SkipTest("known failure of test_stata on non-little endian")

class TestStata(tm.TestCase):

    def setUp(self):
        # Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from:
        # http://stata-press.com/data/glmext.html
        self.dirpath = tm.get_data_path()
        self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
        self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')

        self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
        self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
        self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
        self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')