def bloomberg(self): AA = ["energy", "metals", "agriculture"] BB = ["energy", "markets/commodities/futures/metals","markets/commodities/futures/agriculture"] for num in range(len(AA)): table_name = 'bloomberg_list' UURL = r"https://www.bloomberg.co.jp/" + BB[num] dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) # 新規追加確認 https://www.bloomberg.co.jp/markets/commodities/futures/metals for i in range(len(dfs)): for idx, row in dfs[i].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[i].columns[ii]] = str(row[ii]) if "単位" in dict_w.keys(): pass else: dict_w["単位"] = "" dict_w["価格"] = "" dict_w["先物契約中心限月"] = "" common.insertDB3(DB_INFO, table_name, dict_w) table_name = "bonds" UURL = "https://www.bloomberg.co.jp/markets/rates-bonds/government-bonds/us" dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) # 新規追加確認 for i in [0, 1]: for idx, row in dfs[i].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[i].columns[ii]] = str(row[ii]) common.insertDB3(DB_INFO, table_name, dict_w)
def JPbond(self): table_name = "JPbond" rashio = {} UURL = "http://port.jpx.co.jp/jpx/template/quote.cgi?F=tmp/future_daytime" dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) # 新規追加確認 for idx, row in dfs[0].iterrows(): dict_w = {} if row[0].count("長期国債先物"): dfs[0] = dfs[0].rename(columns={'Unnamed: 0': '銘柄名','日中取引': '始値', '清算値段': '高値', '制限値幅上限下限': '安値', '建玉残高': '現在値','Unnamed: 7': '前日比', 'Unnamed: 8': '取引高', 'Unnamed: 9': '売り気配', 'Unnamed: 10': '売り気配数量', 'Unnamed: 11': '買い気配','Unnamed: 12': '買い気配数量', 'Unnamed: 13': '清算値段', 'Unnamed: 14': '制限値幅上限下限', 'Unnamed: 15': '建玉残高'}) for ii in range(len(row)): if str(row[ii]).count("("): sp = str(row[ii]).split("(") dict_w[dfs[0].columns[ii]] = sp[0] else: dict_w[dfs[0].columns[ii]] = str(row[ii]) break common.insertDB3(DB_INFO, table_name, dict_w)
def traders_web_W(self): # 1111111投資動向週間 table_name = 'investment_weekly' UURL = "https://www.traders.co.jp/domestic_stocks/stocks_data/investment_3/investment_3.asp" dfs = common.read_html2(common.Chorme_get(UURL), 0) # header=0,skiprows=0(省略可能) temp = common.temp_path("csv", os.path.basename(__file__) + "investment.csv") dfs[1].to_csv(temp) f = open(temp, 'r') dataReader = csv.reader(f) # 新規追加確認 for row in dataReader: if row[1] == '最新週': dict_w = {'週間': common.env_time()[1][:10], '海外投資家': row[2], '生損保': row[3], '銀行': row[4], '信託銀行': row[5], 'その他金融': row[6], '小計_金融法人': row[7],'事業法人': row[8], 'その他法人': row[9], '投信': row[10], '計_法人': row[11], '現金': row[12], '信用': row[13], '計_現金信用': row[14]} # 重複チェック sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': table_name} sql_pd = common.select_sql(DB_INFO, sqls) if len(sql_pd) > 0: if dict_w['海外投資家'] != sql_pd.loc[0, '海外投資家']: common.insertDB3(DB_INFO, table_name, dict_w) else: common.insertDB3(DB_INFO, table_name, dict_w) print(table_name, dict_w)
def tocom_gen(self): AA = ['金', '白金', 'プラッツドバイ原油', 'ゴム', 'とうもろこし', 'ゴールドスポット', 'プラチナスポット'] BB = [4, 20, 72, 84, 88, 12, 28] # 信用残の推移(週次) UURL = "http://www.tocom.or.jp/jp/souba/souba_sx/index.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) print(len(dfs)) print(dfs[100].ix[0, 0]) if dfs[100].ix[0, 0] != "日中取引計": self.send_msg += "TOCOM弦月チェックテーブル構成が変わりました。確認してください" + "\n" for ii in range(len(AA)): dict_w = {} table_name = "限月"+AA[ii] for c in range(2): cnt = 0 if c == 0: # 日中 col_list = list(dfs[BB[ii]].columns) num = BB[ii] else: # 夜間 col_list = [t + "L" for t in col_list] num -= 2 df = dfs[num].sort_index(ascending=False) # ソート for idx, row in df.iterrows(): for i in range(len(row)): if col_list[i] == ' - ': continue # スポットは現月がないのでスキップ if cnt == 0: dict_w[col_list[i]] = row[i] else: dict_w[col_list[i]+str(cnt)] = row[i] cnt += 1 print(DB_INFO, table_name, dict_w) common.insertDB3(DB_INFO, table_name, dict_w)
def tocom_up(self): AA = ['金', 'ゴールドスポット', '白金', 'プラチナスポット', 'プラッツドバイ原油', 'ゴム'] BB = ['金 標準取引 (1kg)', 'ゴールドスポット', '白金 標準取引 (500g)','プラチナスポット', 'プラッツドバイ原油', 'ゴム'] # 信用残の推移(週次) UURL = "http://www.tocom.or.jp/jp/souba/baibai_top10/index.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) for ii in range(1, len(dfs)): rashio = {} for i in range(len(AA)): if len(dfs[ii]) > 10: if dfs[ii].columns[0].replace("\n", "").replace(" ", "") == BB[i] or dfs[ii].columns[0].replace("\n", "") == BB[i]: table_name = AA[i] print(dfs[ii].columns[0]) for idx, row in dfs[ii].iterrows(): if idx < 1: continue if idx % 2 == 0: rashio[row[0].replace(" ", "")] = row[1] # ヘッダー存在チェック new_list = [l for l in rashio] print(new_list) aaa = common.column_check(DB_INFO, table_name, new_list) # DBインサート common.insertDB3(DB_INFO, table_name, rashio) break table_name = "ゴム" UURL = "http://www.tocom.or.jp/jp/market/reference/kurani_rubber.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) for ii in range(1, len(dfs)): rashio = {} if dfs[ii].columns[0] == "指定倉庫": for idx, row in dfs[ii].iterrows(): if idx == 4: for i in range(len(dfs[ii].columns)): if i > 0: rashio[dfs[ii].columns[i].replace("\n\t\t\t", "").replace(" ", "").replace(".", "_")] = row[i] break # 最終行ROWID取得 rid = common.last_rowid(DB_INFO, table_name) # DBアップデート sqls = common.create_update_sql(DB_INFO, rashio, table_name, rid) #最後の引数を削除すると自動的に最後の行 break table_name = "tocom" UURL = "http://www.tocom.or.jp/jp/souba/torikumi/index.html" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) CC = ['カテゴリ', '当業者売', '当業者買', '商品先物取引業者売', '商品先物取引業者買', 'ファンド・投資信託売', 'ファンド・投資信託買', '投資家売', '投資家買','取次者経由売', '取次者経由買', '外国商品先物取引業者経由売', '外国商品先物取引業者経由買', '合計売', '合計買'] col_name = {} col_name = {dfs[0].columns[c]: CC[c] for c in range(len(dfs[0].columns))} dfs[0] = dfs[0].rename(columns=col_name) # DBへのインポート for idx, row in dfs[0].iterrows(): if idx == 0: continue dict_w = {} for ii in range(len(row)): dict_w[dfs[0].columns[ii]] = row[ii] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w)
def rashio19(self): rashio = info.TOPIX_get() # 225日次情報 dict_w = s01_gmo.check_new_data() rashio['N225openD'] = dict_w['N225openD'] rashio['N225highD'] = dict_w['N225highD'] rashio['N225lowD'] = dict_w['N225lowD'] rashio['N225closeD'] = dict_w['N225closeD'] # 本日の相場情報 table_name = "rashio" UURL = "https://www.morningstar.co.jp/RankingWeb/IndicesTable.do" # テーブル情報取得 dfs = common.read_html2(UURL, 0) # header=0,skiprows=0(省略可能) flag = 0 for idx, row in dfs[0].iterrows(): if row[0] == "値上がり" or flag == 1: rashio[row[0]] = row[1] flag = 1 # 指数一覧取得 table_name = "rashio" UURL = "https://indexes.nikkei.co.jp/nkave/index" ret = requests.get(UURL) soup = BeautifulSoup(ret.content, "lxml") # カラム名取得 col_tmp = [] res = soup.find_all("a", attrs={"class": "list-title font-16 divlink"}) for a in res: col_tmp.append(a.string) if a.string == '日経総合株価指数': break # データ取得 val_tmp = [] res = soup.find_all("div", attrs={"class": "col-xs-6 col-sm-2"}) for a in res: val_tmp.append(a.string) # 辞書作成 for i in range(len(col_tmp)): # d = dict(zip(val_tmp,col_tmp)) if val_tmp[i] is None: break rashio[col_tmp[i]] = val_tmp[i] try: #一時的対応エラー2018/7/19 # 信用残の推移(週次) table_name = "rashio" UURL = "https://www.traders.co.jp/margin/transition/transition.asp" # テーブル情報取得 dfs = pd.read_html(common.Chorme_get(UURL), header=0) num = len(dfs)-1 # 最新のみ取得 print(dfs[num]) list_w = ["申込日", "売り株数", "売り前週比", "売り金額", "売り前週比","買い株数", "買い前週比", "買い金額", "買い前週比", "損益率", "信用倍率"] for idx, row in dfs[num].iterrows(): if idx == 3: cnt = 0 print(len(row)) print(len(list_w)) for ii in range(len(row)): rashio[list_w[ii]] = row[cnt] cnt += 2 if list_w[ii] == "信用倍率": break break except: self.send_msg += "traders.co.jp_信用残の推移(週次)エラー発生" + "\n" common.insertDB3(DB_INFO, table_name, rashio)
def traders_web_D(self): # 33333本日の先物取引情報 table_name = "futures_op" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_op.asp" # テーブル情報取得 dfs = pd.read_html(common.Chorme_get(UURL), header=1) for ii in range(len(dfs)): # テーブル番号検索 if dfs[ii].columns[0] == "証券会社名": # 証券会社名 num = ii break # Webから取得した情報のカラム名をリスト化 col_list = [i for i in dfs[num].columns] # DB追加のカラム作成 col_tmp = [] H = '' for i in dfs[num].columns: if i.count("Unnamed"): if 'Unnamed: 2' == i: col_tmp.append('日付') H = 'P' else: col_tmp.append('PUT_CALL') H = 'C' else: col_tmp.append(H + i.replace(".1", "")) # カラムのリネームcol_list→col_tmp # col = dict(zip(col_list,col_tmp)) col = {} col = {col_list[i]: col_tmp[i] for i in range(len(col_list))} dfs[num] = dfs[num].rename(columns=col) # DBからカラム情報取得。ない場合は追加する。 set_new = [i for i in dfs[num].columns] res = common.column_check(DB_INFO, table_name, set_new) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): if str(row[ii]) != str(float("nan")): try: dict_w[dfs[num].columns[ii]] = int(row[ii]) except: dict_w[dfs[num].columns[ii]] = row[ii] else: dict_w[dfs[num].columns[ii]] = 0 dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w) # 本日のカラム取得 sqls = "select * from futures_op where 日付 = %(key1)s" % {'key1': common.env_time()[0][:8]} sql_pd = common.select_sql(DB_INFO, sqls) set_new = [i for i in sql_pd.columns if i != 'now' and i != '証券会社名' and i != '日付' and i != 'PUT_CALL'] # 本日のカラムを対象に合計取得 sqls = "select SUM(" + "),SUM(".join(set_new) + ") from futures_op where 日付 = '%(key1)s'" % {'key1': common.env_time()[0][:8]} sql_pd = common.select_sql(DB_INFO, sqls) for i, row in sql_pd.iterrows(): set_val = [] for ii in range(len(row)): if row[ii] is None: set_val.append(0) else: set_val.append(row[ii]) set_val = common.to_int(set_val) col = {} col = {set_new[i]: set_val[i] for i in range(len(set_new))} col['証券会社名'] = '合計' col['日付'] = common.env_time()[0][:8] col = common.to_int(col) common.insertDB3(DB_INFO, table_name, col) print(col) # 2222222本日の先物取引情報 table_name = "futures" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_top.asp" dfs = common.read_html2(common.Chorme_get(UURL), 1) # header=0,skiprows=0(省略可能) for ii in range(len(dfs)): if dfs[ii].columns[0] == "SELL": num = ii break # カラムの入れ替え CC = ['証券会社名', 'SELL_225', 'BUY_225', 'NET_225', '日付', 'SELL_TOPIX','BUY_TOPIX', 'NET_TOPIX', '更新日', 'SELL_225M', 'BUY_225M', 'NET_225M'] col_name = {} col_name = {dfs[num].columns[c]: CC[c] for c in range(len(dfs[num].columns))} dfs[num] = dfs[num].rename(columns=col_name) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[num].columns[ii]] = row[ii] dict_w['更新日'] = common.env_time()[1] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w)