def TOPIX_get(self, Stime='15:15'): table_name = 'topixL' yest_day = str(datetime.date.today() - datetime.timedelta(days=0)).replace("-", "/") + ' ' + Stime dict_e = {} # 限月取得 dict = {'table': table_name, 'key1': yest_day, 'key2': ''} sqls = "select *,rowid from %(table)s where now > '%(key1)s'" % dict sql_pd = common.select_sql('I08_futures.sqlite', sqls) num = len(sql_pd)-2 gen = sql_pd.loc[num, '限月'] # 限月の妥当性チェック 使えるか? if sql_pd.loc[num, '現在値'] == '--': gen = sql_pd.loc[num+1, '限月'] # 終値期限now取得 dict = {'table': table_name, 'key2': yest_day, 'key3': gen} sqls = "select *,SUBSTR(now,12,2) as T,rowid from %(table)s where 限月 = '%(key3)s' and now > '%(key2)s'" % dict sql_pd = common.select_sql('I08_futures.sqlite', sqls) num = len(sql_pd)-1 dict_e['TOPIXnow' + Stime[:2]] = sql_pd.loc[0, 'now'] dict_e['TOPIX_S' + Stime[:2]] = sql_pd.loc[0, '始値'] dict_e['TOPIX_H' + Stime[:2]] = sql_pd.loc[0, '高値'] dict_e['TOPIX_L' + Stime[:2]] = sql_pd.loc[0, '安値'] dict_e['TOPIX_C' + Stime[:2]] = sql_pd.loc[0, '現在値'] dict_e['TOPIX_CL' + Stime[:2]] = sql_pd.loc[0, '前日終値'] dict_e = common.to_number(dict_e) return dict_e
def main_TP(self): #トラリピ dict_ww = {} code = '米NQ100' table_name = 'NQ100_TP' #情報取得 dict_w = f02_gmo.info_get() dict_w = common.to_number(dict_w) sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': '_gmo_info' } sql_pd = common.select_sql('B05_cfd_stg.sqlite', sqls) dict_t = sql_pd.to_dict('records') dict_t = common.to_number(dict_t[0]) dict_ww['H'] = max(dict_w[code + '_高'], dict_t[code + '_高']) dict_ww['L'] = min(dict_w[code + '_安'], dict_t[code + '_安']) dict_ww['C'] = dict_w[code] #前日の仕掛け情報取得 sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': table_name } sql_pd = common.select_sql('B05_cfd_stg.sqlite', sqls) if len(sql_pd) == 0: common.insertDB3('B05_cfd_stg.sqlite', table_name, dict_ww) return dict_l = sql_pd.to_dict('records') dict_l = common.to_number(dict_l[0]) sp_work = [] #決済確認 if dict_l.get('poji'): sp_work = common.to_number(dict_l['poji'].split("_")) for i in reversed(range(len(sp_work))): if dict_ww['L'] > sp_work[i] + 100: dict_ww['LongPL'] += 100 sp_work.pop(i) dict_ww['poji'] = "_".join([str(n) for n in sp_work]) print(dict_ww) #前日トレード確認 if dict_l.get('trade'): poji = [] sp_work2 = common.to_number(dict_l['trade'].split("_")) if len(sp_work2) > 0: for i in reversed(range(len(sp_work2))): if dict_ww['H'] > sp_work2[i]: print("追加", sp_work2[i], dict_ww['L']) sp_work.append(sp_work2[i]) dict_ww['poji'] = "_".join([str(n) for n in sp_work]) #仕掛け trade = [] VAL = dict_ww['C'] - dict_ww['C'] % 100 + 100 for ii in range(3): VVAL = VAL + 100 * ii if VVAL not in sp_work: trade.append(str(VVAL)) dict_ww['trade'] = "_".join(trade) print(dict_ww) common.insertDB3('B05_cfd_stg.sqlite', table_name, dict_ww)
def main(self): code = 'USD/JPY' self.code = code df_info = common.select_sql( r'I07_fx.sqlite', 'select * from %(table)s where rowid > (select max(rowid) from %(table)s) - %(key1)s' % { 'table': 'gmofx', 'key1': 100 }) del df_info['MXN/JPY'] del df_info['uptime'] del df_info['result'] for code in ['USD/JPY', 'EUR/USD', 'EUR/JPY', 'GBP/JPY']: try: del df_info[code.replace("/", "") + '_result'] except: pass for code in ['USD/JPY', 'EUR/USD', 'EUR/JPY', 'GBP/JPY']: df_info = self.fx_data(df_info, 1) x_data = info.add_avg(df_info, code) result = self.model_save2(x_data, code) print("result", result) sqls = common.create_update_sql( 'I07_fx.sqlite', {code.replace("/", "") + '_result': result}, 'gmofx')
def STR_C(self): #カラムの初期化 sqls = "update kabu_list set L_PL_085 = NULL ,S_PL_085 = NULL" common.db_update('B01_stock.sqlite', sqls) files = os.listdir(compf.CODE_DIR) for i in files: year_e = 2018 code = i.replace(".txt", "") try: y = self.ATR_stg(code, str(year_e - 4), str(year_e), "_base_" + str(year_e)) except: print(code, "不明なエラー発生") continue if len(y) > 500 and int(y.O[1]) > 150 and common.stock_req( code, "SHELL") == 1: dict_pl = {} dict_w = {} L_PL = compf.check_PL(y['plb']) L_PL['MEMO'] = "L_PL_085" S_PL = compf.check_PL(y['pls']) S_PL['MEMO'] = "S_PL_085" for T_PL in [L_PL, S_PL]: dict_w = {} title = code + "_" + str(year_e) + T_PL['MEMO'] if T_PL['MEMO'] == "L_PL_085": pl = 'plb' if T_PL['MEMO'] == "S_PL_085": pl = 'pls' #5年間の成績 if (T_PL['WIN'] > 58 and T_PL['PL'] > 1.1): dict_pl.update(T_PL) #前年の成績 y = self.ATR_stg(code, str(year_e - 0), str(year_e), "_" + title + "_" + str(year_e)) if len(y) == 0: continue T_PL = compf.check_PL(y[pl]) #前年と5年前を比較して前年の方が大きいこと if (dict_pl['PL'] < T_PL['PL']): dict_w[dict_pl['MEMO']] = dict_pl['WIN'] # rowid取得 sqls = "select *,rowid from %(table)s where コード = '%(key1)s' ;" % { 'table': 'kabu_list', 'key1': code } sql_pd = common.select_sql('B01_stock.sqlite', sqls) sqls = common.create_update_sql( 'B01_stock.sqlite', dict_w, 'kabu_list', sql_pd['rowid'][0]) #最後の引数を削除すると自動的に最後の行
def n225_topix_avg(self): dict_w = {} yest_day = str(datetime.date.today() - datetime.timedelta(days=30)).replace("-", "/") dict = {'table': 'rashio', 'key1': yest_day} sqls = "select *,rowid from %(table)s where now > '%(key1)s'" % dict sql_pd = common.select_sql('I01_all.sqlite', sqls) num = len(sql_pd)-1 dict_w['N225_乖離avg30'] = round(sql_pd.loc[num, 'N225closeD'] / sql_pd.N225closeD.rolling(num).mean()[num],3) dict_w['N225_HighLow30'] = round((sql_pd.loc[num, 'N225closeD'] - sql_pd['N225closeD'].min()) / (sql_pd['N225closeD'].max() - sql_pd['N225closeD'].min()),3) dict_w['TOPIX_乖離avg30'] = round(sql_pd.loc[num, 'TOPIX_C15'] / sql_pd.TOPIX_C15.rolling(num).mean()[num],3) dict_w['TOPIX_HighLow30'] = round((sql_pd.loc[num, 'TOPIX_C15'] - sql_pd['TOPIX_C15'].min()) / (sql_pd['TOPIX_C15'].max() - sql_pd['TOPIX_C15'].min()),3) #DBの型変換する。 dict_w = common.to_number(dict_w) # 最終行ROWID取得 rid = common.last_rowid('I01_all.sqlite', 'rashio') # DBアップデート sqls = common.create_update_sql('I01_all.sqlite', dict_w, 'rashio', rid) #最後の引数を削除すると自動的に最後の行 return dict_w
def main_exec2(self, file_csv): sqls = "select *,rowid from kabu_list" sql_pd = common.select_sql('B01_stock.sqlite', sqls) for i, row in sql_pd.iterrows(): code = row['コード'] if common.stock_req(code, 1) == 1: #売りフラグあり print(code) code_text = os.path.join(compf.CODE_DIR, str(code) + '.txt') if os.path.exists(code_text): df = pd.DataFrame( index=pd.date_range('2007/01/01', common.env_time()[1][0:10])) df = df.join( pd.read_csv(code_text, index_col=0, parse_dates=True, encoding="cp932", header=None)) df = df.dropna() if len(df) > 1500: if file_csv == '_Monthly_last.csv': PL = self.Monthly_last(code, df) if file_csv == '_vora_stg.csv': PL = self.vora_stg(code, df) # if file_csv == '_ATR_stg.csv': # PL = self.ATR_stg(code,df) if file_csv == '_day_stg.csv': PL = self.day_stg(code, df) if len(PL) > 0: if row['市場'].count(","): sp = row['市場'].split(",") row['市場'] = sp[0] title = str(row['コード']) + "_" + str( row['銘柄名']) + "_" + str( row['セクタ']) + "_" + str( row['市場']) + file_csv Equity, backreport = compf.BacktestReport( PL, title, self.S_DIR, 1.1, "フィルター除外") #"フィルター除外"
def main_bak(self): code = 'USD/JPY' self.code = code df_info = common.select_sql( r'I07_fx.sqlite', 'select * from %(table)s where rowid > (select max(rowid) from %(table)s) - %(key1)s' % { 'table': 'gmofx', 'key1': 100 }) del df_info['MXN/JPY'] del df_info['uptime'] del df_info['result'] df_info = self.fx_data(df_info, 1) x_data = info.add_avg(df_info, code) print(len(x_data.columns)) result = self.model_save2(x_data, code) print("result", result) sqls = common.create_update_sql('I07_fx.sqlite', {'result': result}, 'gmofx')
def retry_check(self): sqls = 'select *,rowid from retry where status < 0 ;' sql_pd = common.select_sql(self.INFO_DB, sqls) for i, row in sql_pd.iterrows(): common.to_number(row) dict_w = {} bybypara = dict(row) try: result, msg = f02_gmo.gmo_cfd_exec(bybypara) if str(msg).count('正常終了'): dict_w['status'] = 0 else: dict_w['status'] = row['status'] + 1 except: dict_w['status'] = row['status'] + 1 self.send_msg += u'CFDトレードリトライ異常終了_' + bybypara['code'] + "\n" sqls = common.create_update_sql( self.INFO_DB, dict_w, 'retry', bybypara['rowid']) #最後の引数を削除すると自動的に最後の行
def fx_data(rows=1000000): #●サブデータ sql_pd = common.select_sql( r'I07_fx.sqlite', 'select * from %(table)s where rowid > (select max(rowid) from %(table)s) - %(key1)s' % { 'table': 'gmofx', 'key1': rows }) df_info = sql_pd.iloc[:, :20] #nowを日付に置換(yyyy:mm:dd hh:mm:ss → yyyy-mm-dd) # df_info['now'] = df_info['now'].map(lambda x: x[:10].replace('/', '-')) #インデックス再設定 日時処理のみ df_info = df_info.set_index('now') df_info['M'] = pd.to_datetime(df_info.index, infer_datetime_format=True).month df_info['W'] = pd.to_datetime(df_info.index, infer_datetime_format=True).dayofweek df_info['H'] = pd.to_datetime(df_info.index, infer_datetime_format=True).hour #NaNを置換 df_info = df_info.fillna(0) #数字以外は置換 df_info = df_info.replace(['nan', '--', '-'], 0) list_w = [] for col in df_info.columns: try: df_info[col] = df_info[col].map(lambda x: str(x).replace(',', '')) df_info[col] = df_info[col].astype(np.float64) #20200106追加 ZERO = sum(df_info[col] == 0) / len(df_info) #全体で50%以上は削除 if ZERO > 0.5: print("del", col, ZERO) list_w.append(col) except: print("NG", col) list_w.append(col) df_info = df_info.drop(list_w, axis=1) return df_info
def traders_web_W(self): # 1111111投資動向週間 table_name = 'investment_weekly' UURL = "https://www.traders.co.jp/domestic_stocks/stocks_data/investment_3/investment_3.asp" dfs = common.read_html2(common.Chorme_get(UURL), 0) # header=0,skiprows=0(省略可能) temp = common.temp_path("csv", os.path.basename(__file__) + "investment.csv") dfs[1].to_csv(temp) f = open(temp, 'r') dataReader = csv.reader(f) # 新規追加確認 for row in dataReader: if row[1] == '最新週': dict_w = {'週間': common.env_time()[1][:10], '海外投資家': row[2], '生損保': row[3], '銀行': row[4], '信託銀行': row[5], 'その他金融': row[6], '小計_金融法人': row[7],'事業法人': row[8], 'その他法人': row[9], '投信': row[10], '計_法人': row[11], '現金': row[12], '信用': row[13], '計_現金信用': row[14]} # 重複チェック sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': table_name} sql_pd = common.select_sql(DB_INFO, sqls) if len(sql_pd) > 0: if dict_w['海外投資家'] != sql_pd.loc[0, '海外投資家']: common.insertDB3(DB_INFO, table_name, dict_w) else: common.insertDB3(DB_INFO, table_name, dict_w) print(table_name, dict_w)
def breakout_ma_two(self, window0, window9, col, table): status = 0 data = {'L_flag': "", 'S_flag': "", 'S_PL': "", 'L_PL': ""} sqls = "select %(key1)s from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': table, 'key1': col } sql_pd = common.select_sql(self.INFO_DB, sqls) data['S3_R'] = float(sql_pd.loc[0, col]) # データ更新、データフレームに引き出す tablename = col + "_breakout_ma_two" common.insertDB3(self.INFO_DB, tablename, data) sqls = "select *,rowid from " + tablename tsd = common.select_sql(self.INFO_DB, sqls) tsd.S3_R.dropna() cnt = len(tsd) - 1 if cnt < 10: return status data['avg_' + str(window0)] = tsd.S3_R.rolling(window0).mean().shift(1)[cnt] data['avg_' + str(window9)] = tsd.S3_R.rolling(window9).mean().shift(1)[cnt] cnt2 = len(tsd) - 2 if tsd.loc[cnt2, 'S_flag'] is None or tsd.loc[cnt2, 'S_flag'] == "": S_flag = 0 else: S_flag = float(tsd.loc[cnt2, 'S_flag']) if tsd.loc[cnt2, 'L_flag'] is None or tsd.loc[cnt2, 'L_flag'] == "": L_flag = 0 else: L_flag = float(tsd.loc[cnt2, 'L_flag']) # window0 = 32 797.98438 # window9 = 12 798.41667 data['S_flag'] = tsd.loc[cnt2, 'S_flag'] data['L_flag'] = tsd.loc[cnt2, 'L_flag'] common.to_number(data) status = 0 #仕切り if data['avg_' + str(window0)] > data[ 'avg_' + str(window9)] and S_flag != 0: # exit short-position data['S_PL'] = S_flag - data['S3_R'] # レポート用 data['S_flag'] = 0 status = -2 elif data['avg_' + str(window0)] < data[ 'avg_' + str(window9)] and L_flag != 0: # exit short-position data['L_PL'] = data['S3_R'] - L_flag # レポート用 data['L_flag'] = 0 #仕掛け elif data['avg_' + str(window0)] < data[ 'avg_' + str(window9)] and S_flag == 0: # entry short-position data['S_flag'] = data['S3_R'] status = -1 elif data['avg_' + str(window0)] > data[ 'avg_' + str(window9)] and L_flag == 0: # entry short-position data['L_flag'] = data['S3_R'] status = 1 # status = 2 # rowid取得 sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': tablename } sql_pd = common.select_sql(self.INFO_DB, sqls) sqls = common.create_update_sql(self.INFO_DB, data, tablename, sql_pd['rowid'][0]) return status
def cfd_poji_check(self): ok_msg = "" list_code, list_type = f02_gmo.info_pojicheck() codes, types, amounts = f03_ctfx.f03_ctfx_main({ 'kubun': 'ポジションチェック', 'amount': '2,000' }) # 全テーブル情報取得 sqls = "select name from sqlite_master where type='table'" sql_pd = common.select_sql(self.INFO_DB, sqls) for i, rrow in sql_pd.iterrows(): table_name = rrow['name'] sp_work = table_name.split("_") code = sp_work[0] if len(sp_work) != 4: continue sqls = "select L_flag,S_flag from %(table)s where (L_flag > 0 or S_flag > 0) and rowid=(select max(rowid) from %(table)s) ;" % { 'table': table_name } sql_pdd = common.select_sql(self.INFO_DB, sqls) if len(sql_pdd) > 0: if sql_pdd['L_flag'][0] != "": if float(sql_pdd['L_flag'][0]) > 0: type_w = "買" if sql_pdd['S_flag'][0] != "": if float(sql_pdd['S_flag'][0]) > 0: type_w = "売" #CDFのポジションチェック for ii in range(len(list_code)): if list_code[ii] == code and list_type[ii] == type_w: del list_code[ii] del list_type[ii] ok_msg += u'CFDポジション一致_' + code + "_" + type_w + "\n" break else: #FXのポジションチェック for ii in range(len(codes)): if codes[ii] == code and types[ii] == type_w: del codes[ii] del types[ii] del amounts[ii] ok_msg += u'FXポジション一致_' + code + "_" + type_w + "\n" break else: self.send_msg += u'FXポジションなし_' + code + "_" + type_w + "\n" if code.count("JPY") or code.count("USD"): bybypara = { 'code': code, 'amount': 2, 'buysell': type_w, 'kubun': '新規', 'nari_hiki': '', 'settle': 0, 'comment': code + '_成行' } # f03_ctfx.f03_ctfx_main(bybypara) else: bybypara = { 'code': code, 'amount': 1, 'buysell': type_w, 'kubun': '新規', 'nari_hiki': '', 'settle': 0, 'comment': code + '_成行' } # f02_gmo.gmo_cfd_exec(bybypara) if len(list_code) > 0: self.send_msg += u'未決済銘柄あり_' + '_'.join([ k for k in list_code ]) + "\n" + '_'.join([k for k in list_type]) + "\n" for ii in range(len(list_code)): bybypara = { 'code': list_code[ii], 'amount': 1, 'buysell': list_type[ii], 'kubun': '決済', 'nari_hiki': '', 'settle': -1, 'comment': list_code[ii] + '_' + list_type[ii] + '決済' } f02_gmo.gmo_cfd_exec(bybypara) if len(codes) > 0: self.send_msg += u'未決済銘柄あり_' + '_'.join([ k for k in codes ]) + "\n" + '_'.join([k for k in types]) + "\n" + '_'.join( [k for k in amounts]) + "\n" + ok_msg for ii in range(len(codes)): code = codes[ii][:3] + '/' + codes[ii][3:] bybypara = { 'code': code, 'amount': amounts[ii][:1], 'buysell': types[ii], 'kubun': '決済', 'nari_hiki': '', 'settle': -1, 'comment': codes[ii] + '_' + types[ii] + '決済' } f03_ctfx.f03_ctfx_main(bybypara)
def breakout_ma_three(self, window0, window9, window5, col, table): status = 0 data = {'L_flag': "", 'S_flag': "", 'S_PL': "", 'L_PL': "", 'S3_R': ""} sqls = 'select "%(key1)s" from %(table)s where rowid=(select max(rowid) from %(table)s) ;' % { 'table': table, 'key1': col } sql_pd = common.select_sql(self.INFO_DB, sqls) data['S3_R'] = float(sql_pd.loc[0, col]) # データ更新、データフレームに引き出す tablename = col + "_breakout_ma_three" common.insertDB3(self.INFO_DB, tablename, data) col_name = ', '.join([k for k in data.keys()]) sqls = "select *,rowid from %(table)s" % {'table': tablename} tsd = common.select_sql(self.INFO_DB, sqls) tsd.S3_R.dropna() cnt = len(tsd) - 1 if cnt < 10: return status data['avg_' + str(window0)] = tsd.S3_R.rolling(window0).mean().shift(1)[cnt] data['avg_' + str(window9)] = tsd.S3_R.rolling(window9).mean().shift(1)[cnt] data['avg_' + str(window5)] = tsd.S3_R.rolling(window5).mean().shift(1)[cnt] # レポート用 # init---------------------------------- cnt2 = len(tsd) - 2 if tsd.loc[cnt2, 'S_flag'] is None or tsd.loc[cnt2, 'S_flag'] == "": S_flag = 0 else: S_flag = float(tsd.loc[cnt2, 'S_flag']) if tsd.loc[cnt2, 'L_flag'] is None or tsd.loc[cnt2, 'L_flag'] == "": L_flag = 0 else: L_flag = float(tsd.loc[cnt2, 'L_flag']) data['S_flag'] = tsd.loc[cnt2, 'S_flag'] data['L_flag'] = tsd.loc[cnt2, 'L_flag'] common.to_number(data) # entry short-position if S_flag == 0 and data['avg_' + str(window0)] < data[ 'avg_' + str(window5)] < data['avg_' + str(window9)]: data['S_flag'] = data['S3_R'] status = -1 elif (data['avg_' + str(window0)] > data['avg_' + str(window5)] or data['avg_' + str(window5)] > data['avg_' + str(window9)] ) and S_flag != 0: # exit short-position data['S_PL'] = S_flag - data['S3_R'] data['S_flag'] = 0 status = -2 # entry short-position elif L_flag == 0 and data['avg_' + str(window0)] > data[ 'avg_' + str(window5)] > data['avg_' + str(window9)]: data['L_flag'] = data['S3_R'] status = 1 elif (data['avg_' + str(window0)] < data['avg_' + str(window5)] or data['avg_' + str(window5)] < data['avg_' + str(window9)] ) and L_flag != 0: # exit short-position data['L_PL'] = data['S3_R'] - L_flag data['L_flag'] = 0 # rowid取得 sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': tablename } sql_pd = common.select_sql(self.INFO_DB, sqls) sqls = common.create_update_sql(self.INFO_DB, data, tablename, sql_pd['rowid'][0])
def breakout_simple_f(self, window0, window9, f0, f9, col, table): status = 0 data = { 'L_flag': "", 'S_flag': "", 'L_SUM': "", 'S_PL': "", 'L_PL': "" } sqls = "select %(key1)s from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': table, 'key1': col } sql_pd = common.select_sql(self.INFO_DB, sqls) data['S3_R'] = float(sql_pd.loc[0, col]) # データ更新、データフレームに引き出す tablename = col + "_breakout_simple_f" common.insertDB3(self.INFO_DB, tablename, data) sqls = "select *,rowid from " + tablename tsd = common.select_sql(self.INFO_DB, sqls) tsd.S3_R.dropna() cnt = len(tsd) - 1 if cnt < 10: return 0 data['max_s' + str(window0)] = tsd.S3_R.rolling(window0).max().shift( 1)[cnt] #ub0 data['min_s' + str(window0)] = tsd.S3_R.rolling(window0).min().shift(1)[cnt] #2 data['max_e' + str(window9)] = tsd.S3_R.rolling(window9).max().shift( 1)[cnt] #ub9 data['min_e' + str(window9)] = tsd.S3_R.rolling(window9).min().shift( 1)[cnt] #lb9 data['avg_l' + str(f0)] = tsd.S3_R.rolling(f0).mean().shift(1)[cnt] #f0 data['avg_s' + str(f9)] = tsd.S3_R.rolling(f9).mean().shift(1)[cnt] #f9 # init---------------------------------- cnt2 = len(tsd) - 2 if tsd.loc[cnt2, 'S_flag'] is None or tsd.loc[cnt2, 'S_flag'] == "": S_flag = 0 else: S_flag = float(tsd.loc[cnt2, 'S_flag']) if tsd.loc[cnt2, 'L_flag'] is None or tsd.loc[cnt2, 'L_flag'] == "": L_flag = 0 else: L_flag = float(tsd.loc[cnt2, 'L_flag']) data['S_flag'] = tsd.loc[cnt2, 'S_flag'] data['L_flag'] = tsd.loc[cnt2, 'L_flag'] common.to_number(data) c = data['S3_R'] status = 0 if c > data['max_e' + str(window9)] and S_flag != 0: # exit short-position data['S_PL'] = S_flag - c # レポート用 data['S_flag'] = 0 status = -2 elif c < data['min_e' + str(window9)] and L_flag != 0: # exit short-position data['L_PL'] = c - L_flag # レポート用 data['L_flag'] = 0 status = 2 elif c < data['min_s' + str(window0)] and S_flag == 0 and L_flag == 0 and data[ 'avg_s' + str(f9)] > data['avg_l' + str(f0)]: data['S_flag'] = c status = -1 elif c > data['max_s' + str(window0)] and S_flag == 0 and L_flag == 0 and data[ 'avg_s' + str(f9)] < data['avg_l' + str(f0)]: data['L_flag'] = c status = 1 """ #仕切りチェック if status == -1 and L_flag != 0: print("仕切り1") self.byby_exec_fx(2, col, 1) if status == 1 and S_flag != 0: print("仕切り2") self.byby_exec_fx(-2, col, 1) """ # rowid取得 sqls = "select *,rowid from %(table)s where rowid=(select max(rowid) from %(table)s) ;" % { 'table': tablename } sql_pd = common.select_sql(self.INFO_DB, sqls) sqls = common.create_update_sql(self.INFO_DB, data, tablename, sql_pd['rowid'][0]) return status
def traders_web_D(self): # 33333本日の先物取引情報 table_name = "futures_op" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_op.asp" # テーブル情報取得 dfs = pd.read_html(common.Chorme_get(UURL), header=1) for ii in range(len(dfs)): # テーブル番号検索 if dfs[ii].columns[0] == "証券会社名": # 証券会社名 num = ii break # Webから取得した情報のカラム名をリスト化 col_list = [i for i in dfs[num].columns] # DB追加のカラム作成 col_tmp = [] H = '' for i in dfs[num].columns: if i.count("Unnamed"): if 'Unnamed: 2' == i: col_tmp.append('日付') H = 'P' else: col_tmp.append('PUT_CALL') H = 'C' else: col_tmp.append(H + i.replace(".1", "")) # カラムのリネームcol_list→col_tmp # col = dict(zip(col_list,col_tmp)) col = {} col = {col_list[i]: col_tmp[i] for i in range(len(col_list))} dfs[num] = dfs[num].rename(columns=col) # DBからカラム情報取得。ない場合は追加する。 set_new = [i for i in dfs[num].columns] res = common.column_check(DB_INFO, table_name, set_new) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): if str(row[ii]) != str(float("nan")): try: dict_w[dfs[num].columns[ii]] = int(row[ii]) except: dict_w[dfs[num].columns[ii]] = row[ii] else: dict_w[dfs[num].columns[ii]] = 0 dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w) # 本日のカラム取得 sqls = "select * from futures_op where 日付 = %(key1)s" % {'key1': common.env_time()[0][:8]} sql_pd = common.select_sql(DB_INFO, sqls) set_new = [i for i in sql_pd.columns if i != 'now' and i != '証券会社名' and i != '日付' and i != 'PUT_CALL'] # 本日のカラムを対象に合計取得 sqls = "select SUM(" + "),SUM(".join(set_new) + ") from futures_op where 日付 = '%(key1)s'" % {'key1': common.env_time()[0][:8]} sql_pd = common.select_sql(DB_INFO, sqls) for i, row in sql_pd.iterrows(): set_val = [] for ii in range(len(row)): if row[ii] is None: set_val.append(0) else: set_val.append(row[ii]) set_val = common.to_int(set_val) col = {} col = {set_new[i]: set_val[i] for i in range(len(set_new))} col['証券会社名'] = '合計' col['日付'] = common.env_time()[0][:8] col = common.to_int(col) common.insertDB3(DB_INFO, table_name, col) print(col) # 2222222本日の先物取引情報 table_name = "futures" UURL = "https://www.traders.co.jp/domestic_stocks/invest_tool/futures/futures_top.asp" dfs = common.read_html2(common.Chorme_get(UURL), 1) # header=0,skiprows=0(省略可能) for ii in range(len(dfs)): if dfs[ii].columns[0] == "SELL": num = ii break # カラムの入れ替え CC = ['証券会社名', 'SELL_225', 'BUY_225', 'NET_225', '日付', 'SELL_TOPIX','BUY_TOPIX', 'NET_TOPIX', '更新日', 'SELL_225M', 'BUY_225M', 'NET_225M'] col_name = {} col_name = {dfs[num].columns[c]: CC[c] for c in range(len(dfs[num].columns))} dfs[num] = dfs[num].rename(columns=col_name) # DBへのインポート for idx, row in dfs[num].iterrows(): dict_w = {} for ii in range(len(row)): dict_w[dfs[num].columns[ii]] = row[ii] dict_w['更新日'] = common.env_time()[1] dict_w['日付'] = common.env_time()[0][:8] common.insertDB3(DB_INFO, table_name, dict_w)