コード例 #1
0
 def read_data(self, *args, **kwargs):
     if isinstance(args[0], pd.DataFrame):
         return args[0]
     if isinstance(args[0], str):
         path_elms = os.path.splitext(args[0])
         if path_elms[1].lower() in [".xls", ".xlsx"]:
             print(
                 "\n开始载入数据表格...\n\n如果数据表格太大,此处可能会耗时很长...\n如果长时间无法载入,请将 Excel 表转换为 CSV 格式后重新尝试...\n")
             table = pd.read_excel(*args, dtype=str, **kwargs)
         elif path_elms[1].lower() == '.csv':
             table = self.read_csv(*args, **kwargs)
         elif path_elms[1].lower() == '.txt':
             table = pd.read_data(*args, **kwargs)
         elif path_elms[1].lower() == ".json":
             pass
         elif path_elms[1].lower() == "":
             table = pd.read_sql(*args, **kwargs)
     print("\n数据表载入完毕。\n")
     return table
コード例 #2
0
def excel_loader():
    xl = pd.read_data('superstore.xls', sheet_name='Orders')
    print(xl)
コード例 #3
0
        input_ids = self.ftokenize(text)
        preds = self.fmodel.predict(input_ids)
        if preds == 0:
            return "NEGATIVE"
        elif preds == 1:
            return "POSITIVE"
        else:
            return "NEUTRAL"

    def fpipe_list(self, list):
        input_ids = self.ftokenize(list)
        preds = self.fmodel.predict(input_ids)
        output = []
        for pred in tqdm(preds, desc="predicting"):
            if pred == 0:
                output.append("NEGATIVE")
            elif pred == 1:
                output.append("POSITIVE")
            else:
                output.append("NEUTRAL")
        return output


if __name__ == "__main__":
    bort = BertSentiment()
    bottom_up_data = pd.read_data("D:\\tweets\\depression_tweets.csv",
                                  usecols="text")["text"].tolist()
    top_down_data = pd.read_data("D:\\tweets\\depression_tweets.csv",
                                 usecols="text")["text"].tolist()
    bottom_up_preds = bort.fpipe_list(bottom_up_data)
    top_down_preds = bort.fpipe_list(top_down_data)
コード例 #4
0
ファイル: example.py プロジェクト: zwMargaret/Python-Code
f = symbol_csv
ticker_list = list(pd.read_csv(f)['Symbol'].unique())
download_all_prices(ticker_list, dir_prices_1, dir_prices_2, start_date, end_date)

# download index data
dir_prices_index = common_dir_dict['dir_prices_index']
download_all_prices(['SPY'], dir_prices_index, dir_prices_index, start_date, end_date)


#----------------------------------------------------------------
# Step 2: Get samples with abnormal returns
from abnormal_returns import outputResults

index_filename = dir_prices_index + 'SPY.csv'
index_df = pd.read_data(index_filename)
car_dir = common_dir_dict['car_dir']
L = 240
W = 10

t1 = collectStockName(dir_prices_1)
iex = False
outputResults(t1,dir_prices_1,index_df,car_dir,L,W,iex)

t2 = collectStockName(dir_prices_2)
iex = True
outputResults(t2,dir_prices_2,index_df,car_dir,L,W,iex)

from high_car import carSelect
output = carSelect(car_dir,car_column='CAR[-W:0]',t_column='CAR[-W:0] t-Score',car_criteria=0.1,t_criteria=1.96,days=11)
car_csv = common_dir_dict['input_dir'] + '{}_car.csv'.format(car_num)
コード例 #5
0
import pandas as pd
import json


def save(df, filename):
    writer = pd.ExcelWriter(filename)
    df.to_excel(writer, "sheet1")
    writer.save()


#jsonString = open("./nonPayment.json").read()
df = pd.read_data("./nonPayment.json")

print(df.count())

save(df, "nonPayment.xlsx")