def test_data_method(self): # Minimal testing of legacy data method reader_114 = StataReader(self.dta1_114) with warnings.catch_warnings(record=True) as w: parsed_114_data = reader_114.data() reader_114 = StataReader(self.dta1_114) parsed_114_read = reader_114.read() tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_data_method(self): # Minimal testing of legacy data method with StataReader(self.dta1_114) as rdr: with warnings.catch_warnings(record=True) as w: # noqa parsed_114_data = rdr.data() with StataReader(self.dta1_114) as rdr: parsed_114_read = rdr.read() tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_variable_labels(self): sr_115 = StataReader(self.dta16_115).variable_labels() sr_117 = StataReader(self.dta16_117).variable_labels() keys = ('var1', 'var2', 'var3') labels = ('label1', 'label2', 'label3') for k, v in compat.iteritems(sr_115): self.assertTrue(k in sr_117) self.assertTrue(v == sr_117[k]) self.assertTrue(k in keys) self.assertTrue(v in labels)
def test_read_dta18(self): parsed_118 = self.read_dta(self.dta22_118) parsed_118["Bytes"] = parsed_118["Bytes"].astype('O') expected = DataFrame.from_records( [['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0], ['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan], ['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0], ['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4], ['', '', '', 0, 0.3332999, 'option a', 1 / 3.]], columns=[ 'Things', 'Cities', 'Unicode_Cities_Strl', 'Ints', 'Floats', 'Bytes', 'Longs' ]) expected["Floats"] = expected["Floats"].astype(np.float32) for col in parsed_118.columns: tm.assert_almost_equal(parsed_118[col], expected[col]) with StataReader(self.dta22_118) as rdr: vl = rdr.variable_labels() vl_expected = { u'Unicode_Cities_Strl': u'Here are some strls with Ünicode chars', u'Longs': u'long data', u'Things': u'Here are some things', u'Bytes': u'byte data', u'Ints': u'int data', u'Cities': u'Here are some cities', u'Floats': u'float data' } tm.assert_dict_equal(vl, vl_expected) self.assertEqual(rdr.data_label, u'This is a Ünicode data label')
def convert_to_df(): """ 将stata文件中的重要特征抽取出来,并合成一张dataframe表格 :return: """ # 装载每个年份合并之后的DataFrame的文件名 data_merge_file_name = [ 'read_json_output_file/2010.csv', 'read_json_output_file/2012.csv', 'read_json_output_file/2014.csv', 'read_json_output_file/2016.csv' ] data_path = read_json() # 读取stata文件存放地址 for i in range(len(data_path)): temp = [] for j in range(len(data_path[i])): for key in data_path[i][j].keys(): stata_data_path = key # 当年某表的存放路径 columns_name = data_path[i][j][key] # 该表对应的重要特征 print(columns_name) stata_data = StataReader( stata_data_path, convert_categoricals=False) # 读取stata文件 pd_important_feature = pd.DataFrame( stata_data.read())[columns_name] # 将格式转成DataFrame,并读取其重要特征 temp.append(pd_important_feature) data_merge(temp, data_merge_file_name[i]) # 合并并生成csv文件 print('-------------------------')
def load_stata_file(filepath, index_cols): """ Load data and metadata from Stata file""" data = pd.read_stata(filepath, convert_categoricals=False).set_index(index_cols) with StataReader(filepath) as reader: reader.value_labels() mapping = { col: reader.value_label_dict[t] for col, t in zip(reader.varlist, reader.lbllist) if t in reader.value_label_dict } data.replace(mapping, inplace=True) # convert the categorical variables into # the category type for c in data.columns: if c in mapping: data[c] = data[c].astype('category') # read the actual questions that were asked for reference questions = reader.variable_labels() return data, questions
def test_missing_value_generator(self): types = ('b', 'h', 'l') df = DataFrame([[0.0]], columns=['float_']) with tm.ensure_clean() as path: df.to_stata(path) valid_range = StataReader(path).VALID_RANGE expected_values = ['.' + chr(97 + i) for i in range(26)] expected_values.insert(0, '.') for t in types: offset = valid_range[t][1] for i in range(0, 27): val = StataMissingValue(offset + 1 + i) self.assertTrue(val.string == expected_values[i]) # Test extremes for floats val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]) self.assertTrue(val.string == '.') val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0]) self.assertTrue(val.string == '.z') # Test extremes for floats val = StataMissingValue( struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]) self.assertTrue(val.string == '.') val = StataMissingValue( struct.unpack('<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0]) self.assertTrue(val.string == '.z')
def test_read_dta1(self): reader = StataReader(self.dta1) parsed = reader.data() reader_13 = StataReader(self.dta1_13) parsed_13 = reader_13.data() # Pandas uses np.nan as missing value. # Thus, all columns will be of type float, regardless of their name. expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) # this is an oddity as really the nan should be float64, but # the casting doesn't fail so need to match stata here expected['float_miss'] = expected['float_miss'].astype(np.float32) tm.assert_frame_equal(parsed, expected) tm.assert_frame_equal(parsed_13, expected)
def test_timestamp_and_label(self): original = DataFrame([(1,)], columns=['var']) time_stamp = datetime(2000, 2, 29, 14, 21) data_label = 'This is a data file.' with tm.ensure_clean() as path: original.to_stata(path, time_stamp=time_stamp, data_label=data_label) reader = StataReader(path) parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M')) assert parsed_time_stamp == time_stamp assert reader.data_label == data_label
def load_stata(filepath,indexcol, drop_minornans=False): data = pd.read_stata(filepath, convert_categoricals=False).set_index(indexcol) # convert the categorical variables into # the category type with StataReader(filepath) as reader: reader.value_labels() mapping = {col: reader.value_label_dict[t] for col, t in zip(reader.varlist, reader.lbllist) if t in reader.value_label_dict} # drop records with name labels. data.replace(mapping, inplace=True) # convert the categorical variables into # the category type cat_list = [] for c in data.columns: if c in mapping: cat_list.append(c) data[c] = data[c].astype('category') data['poor'] = data['poor'].astype('category') data.drop('gap',axis=1,inplace=True) data.drop('gapsq',axis=1,inplace=True) data.drop('food_poor',axis=1,inplace=True) data.drop('inc_poor',axis=1,inplace=True) data.drop('Date',axis=1,inplace=True) for i in data.columns: if data[i].dtype == "object": data.drop(i, axis=1, inplace=True) # drop records with only a few nans if drop_minornans: nan_counts = (data.applymap(pd.isnull) .sum(axis=0) .sort_values(ascending=False)) nan_cols = nan_counts[(nan_counts > 0) & (nan_counts < 10)].index.values data = data.dropna(subset=nan_cols) questions = reader.variable_labels() return data, questions, cat_list
def test_minimal_size_col(self): str_lens = (1, 100, 244) s = {} for str_len in str_lens: s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len]) original = DataFrame(s) with tm.ensure_clean() as path: original.to_stata(path, write_index=False) sr = StataReader(path) variables = sr.varlist formats = sr.fmtlist for variable, fmt in zip(variables, formats): self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
def test_read_dta1(self): reader = StataReader(self.dta1) parsed = reader.data() # Pandas uses np.nan as missing value. Thus, all columns will be of type float, regardless of their name. expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=[ 'float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss' ]) for i, col in enumerate(parsed.columns): np.testing.assert_almost_equal(parsed[col], expected[expected.columns[i]])
def read_stata_file(dir, file_name): """ :param dir: stata文件存放目录 :param file_name: :return:返回DataFrame格式和特征表 """ stata_data = StataReader(dir + file_name, convert_categoricals=False) columns_list = list(stata_data.value_labels().keys()) # 列 print(file_name) print(len(columns_list)) print(columns_list[0:10]) print('---------------') return pd.DataFrame(stata_data.read()), columns_list
from pandas.io.stata import StataReader from paths import paths reader = StataReader(paths.abccare) abccare = reader.data(convert_dates=False, convert_categoricals=False) abccare.id.fillna(9999, inplace=True) # This is to include the chidl with missing ID abccare = abccare.dropna(subset=['id']).set_index('id') abccare = abccare.sort_index() abccare.drop(abccare.loc[abccare.abc == 0].index, inplace=True) #abccare.drop(abccare.loc[(abccare.RV==1) & (abccare.R==0)].index, inplace=True) # use same variable for income between CARE and ABC #abccare.loc[abccare.program==0, 'p_inc0'] = abccare.loc[abccare.program==0, 'hh_inc0']
def stataLoad(dta_filename): reader = StataReader(dta_filename) data = reader.data() print("\nLoaded {} rows".format(len(data))) return data
from pandas.io.stata import StataReader, StataWriter filename_all = "CHIP2013_rural_household_f_income_asset.dta" # stata_data = StataReader(filename_all, convert_categoricals=False, encoding='utf-8') # stata_data = StataReader(filename_all, encoding='utf8') stata_data = StataReader(filename_all) print(stata_data) # varlist = stata_data.varlist # # # value_labels = stata_data.value_labels() # # fmtlist = stata_data.fmtlist # # variable_labels = stata_data.variable_labels() # # print(data) # print(varlist) # print(value_labels) # print(fmtlist) # print(variable_labels) # # # writer = StataWriter(fname='mytest_1.dta', data=data, variable_labels=variable_labels) # writer.write_file() # 注意: # 在写入的时候
sys.path.extend([os.path.join(os.path.dirname(__file__), '..')]) from paths import paths #---------------------------------------------------------------- seed = 1234 aux_draw = 2 # need to use more than 1 pset_type = 1 #---------------------------------------------------------------- # bring in file with indexes for interpolation bootstrap interp_index = pd.read_csv(paths.cnlsy_bsid) # bring in file with indexes for extrapolation bootstrap reader = StataReader(paths.psid_bsid) psid = reader.data(convert_dates=False, convert_categoricals=False) psid = psid.iloc[:, 0: aux_draw] # limit PSID to the number of repetitions you need nlsy = pd.read_csv(paths.nlsy_bsid) # set up extrapolation indexes (there are multiple data sets) extrap_index = pd.concat([psid, nlsy], axis=0, keys=('psid', 'nlsy'), names=('dataset', 'id')) extrap_source = ['psid' for j in range(0, psid.shape[0]) ] + ['nlsy' for k in range(0, nlsy.shape[0])] # bring in files with weights reader = StataReader(paths.nlsy_weights)
# -*- coding:utf-8 -*- import pandas as pd from pandas.io.stata import StataReader infilename = r"merge_2.dta" outfile = 'out.csv' if input('are you sure to clear outputfile>>' + outfile + '<<(y/n)?') == 'y': open(outfile, 'w').close() stata_data = StataReader(infilename, convert_categoricals=False) data = stata_data.read() col_n = ['stkcd', 'time', 'rt_year', 'lnme', 'lev', 'size'] data = pd.DataFrame(data, columns=col_n) data = data.dropna(axis=0) def output(string): with open(outfile, 'a') as f: f.write(string) def slice(df_year): # df_year已经从低到高排序 l_stk = df_year.iloc[:int(len(df_year) * 0.3)]['stkcd'].tolist() m_stk = df_year.iloc[int(len(df_year) * 0.3):int(len(df_year) * 0.7)]['stkcd'].tolist() h_stk = df_year.iloc[int(len(df_year) * 0.7):]['stkcd'].tolist() return h_stk, m_stk, l_stk
def __stata_2_dataframes(filename): _stata_data = StataReader(filename, convert_categoricals=False) return ConvertDataFrames.dataframes_split(_stata_data)
#---------------------------------------------------------------- if __name__ == '__main__': from load_data import extrap, interp, abcd np.random.seed(1234) aux_draw = 3 # Bring in auxiliary data interp_index = pd.read_csv(paths.cnlsy_bsid) reader = StataReader(paths.psid_bsid) psid = reader.data(convert_dates=False, convert_categoricals=False) psid = psid.iloc[:, 0: aux_draw] # limit PSID to the number of repetitions you need nlsy = pd.read_csv(paths.nlsy_bsid) interp_index = interp_index.iloc[:, 0] # use position 0 for full NLSY/CNLSY sample extrap_index = pd.concat([psid, nlsy], axis=0, keys=('psid', 'nlsy'), names=('dataset', 'id')) extrap_source = ['psid' for j in range(0, psid.shape[0]) ] + ['nlsy' for k in range(0, nlsy.shape[0])]
def meta_labels(self): """Read the labels for the variables and code values for the variables, using the Stata reader. """ import re import os import struct import pandas as pd from pandas.io.stata import StataReader var_labels = None val_labels = None if not os.path.exists( self.filesystem.path('meta', 'variable_labels.yaml')): for name, fn in self.sources(): if name.endswith('l'): self.log( "Getting labels for {} from {} (This is really slow)". format(name, fn)) reader = StataReader(fn) df = reader.data() # Can't get labels before reading data var_labels = reader.variable_labels() val_labels = reader.value_labels() break self.filesystem.write_yaml(var_labels, 'meta', 'variable_labels.yaml') self.filesystem.write_yaml(val_labels, 'meta', 'value_labels.yaml') else: self.log("Skipping extracts; already exist") # The value codes include both the value codes and the imputation codes. The imputation codes # are extracted as positive integers, when they really should be negative. table_values = {} imputation_values = {} if not val_labels: val_labels = self.filesystem.read_yaml('meta', 'value_labels.yaml') for k, v in val_labels.items(): table_values[k] = {} imputation_values[k] = {-10: 'NO IMPUTATION'} for code, code_val in v.items(): signed_code = struct.unpack('i', struct.pack( 'I', int(code)))[0] # Convert the unsigned to signed if signed_code < 0: imputation_values[k][signed_code] = code_val else: table_values[k][code] = code_val self.filesystem.write_yaml(table_values, 'meta', 'table_codes.yaml') self.filesystem.write_yaml(imputation_values, 'meta', 'imputation_codes.yaml') self.log("{} table variables".format(len(table_values))) self.log("{} imputation variables".format(len(imputation_values))) return True
if not os.path.exists(paths.data): os.mkdir(paths.data) '''Load and Cache Datasets ----------------------- Notes: - Ensures no overlap in id - Trims observations with any labor income over $300,000 (U.S., 2014) ''' #-------------------------------------------------------------------- print "Loading PSID" reader = StataReader(paths.psid) psid = reader.read(convert_dates=False, convert_categoricals=False) psid = psid.dropna(subset=['id']).set_index('id') # Trimming inc = psid.filter(regex='^inc_labor[0-9][0-9]') psid = psid.loc[psid.male == 0] psid = psid.loc[psid.black == 1] psid = psid.loc[((inc < inc.quantile(0.90)) | (inc.isnull())).all(axis=1)] # Interpolating plong = pd.wide_to_long(psid[inc.columns].reset_index(), ['inc_labor'], i='id', j='age').sort_index() plong = plong.interpolate(limit=5) pwide = plong.unstack()