def work3(self): allLines = open("test.txt",encoding="utf-8").read() print(allLines) exit() # f = open("TEST.CSV", 'r') # dataReader = csv.reader(f) # for row in dataReader: # print(row) # exit() # exit() # 新規追加確認 # for row in dataReader: # 2222222本日の先物取引情報 table_name = "futures" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_top.asp" # res = common.Chorme_get(UURL) browser = webdriver.PhantomJS() # ライブスターログイン画面にアクセス browser.get(UURL) common.create_file("test.txt", browser.page_source) exit(9) dfs = pd.read_html(res, header=0) for ii in range(len(dfs)): print("XXX",ii,dfs[ii].columns[0]) if dfs[ii].columns[0] == "先物・手口情報": num = ii +1 break dfs[num].to_csv("TEST.CSV") common.create_file("test.txt",dfs[num]) # カラムの入れ替え CC = ['証券会社名', 'SELL_225', 'BUY_225', 'NET_225', '日付', 'SELL_TOPIX','BUY_TOPIX', 'NET_TOPIX', '更新日', 'SELL_225M', 'BUY_225M', 'NET_225M'] col_name = {} col_name = {dfs[num].columns[c]: CC[c] for c in range(len(dfs[num].columns))} dfs[num] = dfs[num].rename(columns=col_name) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[num].columns[ii]] = row[ii] dict_w['更新日'] = common.env_time()[1] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w)
def work4(self): # 2222222本日の先物取引情報 table_name = "futures" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_top.asp" dfs = common.read_html(common.Chorme_get(UURL), 1) # header=0,skiprows=0(省略可能) for ii in range(len(dfs)): print(ii,dfs[ii].columns[0]) if dfs[ii].columns[0] == "SELL": num = ii break # カラムの入れ替え CC = ['証券会社名', 'SELL_225', 'BUY_225', 'NET_225', '日付', 'SELL_TOPIX','BUY_TOPIX', 'NET_TOPIX', '更新日', 'SELL_225M', 'BUY_225M', 'NET_225M'] col_name = {} col_name = {dfs[num].columns[c]: CC[c] for c in range(len(dfs[num].columns))} dfs[num] = dfs[num].rename(columns=col_name) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[num].columns[ii]] = row[ii] dict_w['更新日'] = common.env_time()[1] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w) # 信用残の推移(週次) table_name = "rashio" UURL = "https://www.traders.co.jp/margin/transition/transition.asp" # テーブル情報取得 dfs = pd.read_html(common.Chorme_get(UURL), header=0) num = len(dfs)-1 # 最新のみ取得 print(dfs[num]) list_w = ["申込日", "売り株数", "売り前週比", "売り金額", "売り前週比","買い株数", "買い前週比", "買い金額", "買い前週比", "損益率", "信用倍率"] for idx, row in dfs[num].iterrows(): if idx == 3: cnt = 0 print(len(row)) print(len(list_w)) for ii in range(len(row)): rashio[list_w[ii]] = row[cnt] cnt += 2 if list_w[ii] == "信用倍率": break break common.insertDB3(DB_INFO, table_name, rashio)
def __init__(self, num): #ROOT出力フォルダチェック if os.path.exists(compf.OUT_DIR) == False: os.mkdir(compf.OUT_DIR) S_DIR = os.path.join(compf.OUT_DIR, num) #C:\data\90_profit\06_output if os.path.exists(S_DIR) == False: os.mkdir(S_DIR) #作業フォルダチェック self.S_DIR = os.path.join(S_DIR, common.env_time()[0][:14]) os.mkdir(str(self.S_DIR)) #スクリプトコピー shutil.copy2(__file__, self.S_DIR)
def save_to_csv(self, save_name, title, backreport): #ヘッダー追加 if os.path.exists(save_name) == False: dic_name = ",".join( [str(k[0]).replace(",", "") for k in backreport.items()]) + "\n" with open(save_name, 'w', encoding="cp932") as f: f.write("now,stockname," + dic_name) #1列目からデータ挿入 dic_val = ",".join( [str(round(k[1], 3)).replace(",", "") for k in backreport.items()]) + "\n" with open(save_name, 'a', encoding="cp932") as f: f.write(common.env_time()[1] + "," + title + "," + dic_val)
def main_exec2(self, file_csv): sqls = "select *,rowid from kabu_list" sql_pd = common.select_sql('B01_stock.sqlite', sqls) for i, row in sql_pd.iterrows(): code = row['コード'] if common.stock_req(code, 1) == 1: #売りフラグあり print(code) code_text = os.path.join(compf.CODE_DIR, str(code) + '.txt') if os.path.exists(code_text): df = pd.DataFrame( index=pd.date_range('2007/01/01', common.env_time()[1][0:10])) df = df.join( pd.read_csv(code_text, index_col=0, parse_dates=True, encoding="cp932", header=None)) df = df.dropna() if len(df) > 1500: if file_csv == '_Monthly_last.csv': PL = self.Monthly_last(code, df) if file_csv == '_vora_stg.csv': PL = self.vora_stg(code, df) # if file_csv == '_ATR_stg.csv': # PL = self.ATR_stg(code,df) if file_csv == '_day_stg.csv': PL = self.day_stg(code, df) if len(PL) > 0: if row['市場'].count(","): sp = row['市場'].split(",") row['市場'] = sp[0] title = str(row['コード']) + "_" + str( row['銘柄名']) + "_" + str( row['セクタ']) + "_" + str( row['市場']) + file_csv Equity, backreport = compf.BacktestReport( PL, title, self.S_DIR, 1.1, "フィルター除外") #"フィルター除外"
def traders_web_W(self): # 1111111投資動向週間 table_name = 'investment_weekly' UURL = "https://www.traders.co.jp/domestic_stocks/stocks_data/investment_3/investment_3.asp" dfs = common.read_html2(common.Chorme_get(UURL), 0) # header=0,skiprows=0(省略可能) temp = common.temp_path("csv", os.path.basename(__file__) + "investment.csv") dfs[1].to_csv(temp) f = open(temp, 'r') dataReader = csv.reader(f) # 新規追加確認 for row in dataReader: if row[1] == '最新週': dict_w = {'週間': common.env_time()[1][:10], '海外投資家': row[2], '生損保': row[3], '銀行': row[4], '信託銀行': row[5], 'その他金融': row[6], '小計_金融法人': row[7],'事業法人': row[8], 'その他法人': row[9], '投信': row[10], '計_法人': row[11], '現金': row[12], '信用': row[13], '計_現金信用': row[14]} # 重複チェック sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': table_name} sql_pd = common.select_sql(DB_INFO, sqls) if len(sql_pd) > 0: if dict_w['海外投資家'] != sql_pd.loc[0, '海外投資家']: common.insertDB3(DB_INFO, table_name, dict_w) else: common.insertDB3(DB_INFO, table_name, dict_w) print(table_name, dict_w)
def tocom_up(self): AA = ['金', 'ゴールドスポット', '白金', 'プラチナスポット', 'プラッツドバイ原油', 'ゴム'] BB = ['金 標準取引 (1kg)', 'ゴールドスポット', '白金 標準取引 (500g)','プラチナスポット', 'プラッツドバイ原油', 'ゴム'] # 信用残の推移(週次) UURL = "http://www.tocom.or.jp/jp/souba/baibai_top10/index.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) for ii in range(1, len(dfs)): rashio = {} for i in range(len(AA)): if len(dfs[ii]) > 10: if dfs[ii].columns[0].replace("\n", "").replace(" ", "") == BB[i] or dfs[ii].columns[0].replace("\n", "") == BB[i]: table_name = AA[i] print(dfs[ii].columns[0]) for idx, row in dfs[ii].iterrows(): if idx < 1: continue if idx % 2 == 0: rashio[row[0].replace(" ", "")] = row[1] # ヘッダー存在チェック new_list = [l for l in rashio] print(new_list) aaa = common.column_check(DB_INFO, table_name, new_list) # DBインサート common.insertDB3(DB_INFO, table_name, rashio) break table_name = "ゴム" UURL = "http://www.tocom.or.jp/jp/market/reference/kurani_rubber.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) for ii in range(1, len(dfs)): rashio = {} if dfs[ii].columns[0] == "指定倉庫": for idx, row in dfs[ii].iterrows(): if idx == 4: for i in range(len(dfs[ii].columns)): if i > 0: rashio[dfs[ii].columns[i].replace("\n\t\t\t", "").replace(" ", "").replace(".", "_")] = row[i] break # 最終行ROWID取得 rid = common.last_rowid(DB_INFO, table_name) # DBアップデート sqls = common.create_update_sql(DB_INFO, rashio, table_name, rid) #最後の引数を削除すると自動的に最後の行 break table_name = "tocom" UURL = "http://www.tocom.or.jp/jp/souba/torikumi/index.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) CC = ['カテゴリ', '当業者売', '当業者買', '商品先物取引業者売', '商品先物取引業者買', 'ファンド・投資信託売', 'ファンド・投資信託買', '投資家売', '投資家買','取次者経由売', '取次者経由買', '外国商品先物取引業者経由売', '外国商品先物取引業者経由買', '合計売', '合計買'] col_name = {} col_name = {dfs[0].columns[c]: CC[c] for c in range(len(dfs[0].columns))} dfs[0] = dfs[0].rename(columns=col_name) # DBへのインポート for idx, row in dfs[0].iterrows(): if idx == 0: continue dict_w = {} for ii in range(len(row)): dict_w[dfs[0].columns[ii]] = row[ii] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w)
def traders_web_D(self): # 33333本日の先物取引情報 table_name = "futures_op" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_op.asp" # テーブル情報取得 dfs = pd.read_html(common.Chorme_get(UURL), header=1) for ii in range(len(dfs)): # テーブル番号検索 if dfs[ii].columns[0] == "証券会社名": # 証券会社名 num = ii break # Webから取得した情報のカラム名をリスト化 col_list = [i for i in dfs[num].columns] # DB追加のカラム作成 col_tmp = [] H = '' for i in dfs[num].columns: if i.count("Unnamed"): if 'Unnamed: 2' == i: col_tmp.append('日付') H = 'P' else: col_tmp.append('PUT_CALL') H = 'C' else: col_tmp.append(H + i.replace(".1", "")) # カラムのリネームcol_list→col_tmp # col = dict(zip(col_list,col_tmp)) col = {} col = {col_list[i]: col_tmp[i] for i in range(len(col_list))} dfs[num] = dfs[num].rename(columns=col) # DBからカラム情報取得。ない場合は追加する。 set_new = [i for i in dfs[num].columns] res = common.column_check(DB_INFO, table_name, set_new) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): if str(row[ii]) != str(float("nan")): try: dict_w[dfs[num].columns[ii]] = int(row[ii]) except: dict_w[dfs[num].columns[ii]] = row[ii] else: dict_w[dfs[num].columns[ii]] = 0 dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w) # 本日のカラム取得 sqls = "select * from futures_op where 日付 = %(key1)s" % {'key1': common.env_time()[0][:8]} sql_pd = common.select_sql(DB_INFO, sqls) set_new = [i for i in sql_pd.columns if i != 'now' and i != '証券会社名' and i != '日付' and i != 'PUT_CALL'] # 本日のカラムを対象に合計取得 sqls = "select SUM(" + "),SUM(".join(set_new) + ") from futures_op where 日付 = '%(key1)s'" % {'key1': common.env_time()[0][:8]} sql_pd = common.select_sql(DB_INFO, sqls) for i, row in sql_pd.iterrows(): set_val = [] for ii in range(len(row)): if row[ii] is None: set_val.append(0) else: set_val.append(row[ii]) set_val = common.to_int(set_val) col = {} col = {set_new[i]: set_val[i] for i in range(len(set_new))} col['証券会社名'] = '合計' col['日付'] = common.env_time()[0][:8] col = common.to_int(col) common.insertDB3(DB_INFO, table_name, col) print(col) # 2222222本日の先物取引情報 table_name = "futures" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_top.asp" dfs = common.read_html2(common.Chorme_get(UURL), 1) # header=0,skiprows=0(省略可能) for ii in range(len(dfs)): if dfs[ii].columns[0] == "SELL": num = ii break # カラムの入れ替え CC = ['証券会社名', 'SELL_225', 'BUY_225', 'NET_225', '日付', 'SELL_TOPIX','BUY_TOPIX', 'NET_TOPIX', '更新日', 'SELL_225M', 'BUY_225M', 'NET_225M'] col_name = {} col_name = {dfs[num].columns[c]: CC[c] for c in range(len(dfs[num].columns))} dfs[num] = dfs[num].rename(columns=col_name) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[num].columns[ii]] = row[ii] dict_w['更新日'] = common.env_time()[1] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w)