def trade(): params = get_params() api = API() try: logger.info('[params], ' + dict2str(params) + 'initial_assets: ' + str(api.get_asset())) # Listenerの追加 buyer = ModelLGBBinary(params['pair'], api, params['norm_mean'], params['norm_std'], params['lower'], params['upper'], params['loss_lower']) seller = ModelLGBBinary(params['pair'], api, params['norm_mean'], params['norm_std'], params['lower'], params['upper'], params['loss_lower']) #seller = ModelNaive(params['reward_upper'], params['loss_lower']) trader = Trader(params['pair'], params['candle_type'], buyer, seller, params['amount'], params['order_type'], params['asset_lower'], api) TimeTradeEnvironment(params['candle_type'], trader, api, params['pair']).run() except Exception: logger.error(traceback.format_exc()) logger.debug('process reboot') os.execv(sys.executable, [sys.executable] + ['trade/tr.py'])
def get_candles_for_feature(candle_types, start_dt, end_dt): candles_for_feature = {} for candle_type in candle_types: candles = API().get_candles(pair, candle_type=candle_type, start_dt=(start_dt - timedelta(1)).strftime('%Y-%m-%d %H:%M:%S'), end_dt=end_dt.strftime('%Y-%m-%d %H:%M:%S')) candles = pd.DataFrame(candles, columns=['open', 'high', 'low', 'close', 'volume', 'timestamp']) candles.index = candles.timestamp.map(lambda x: datetime.fromtimestamp(x / 1000)) candles_for_feature[candle_type] = candles return candles_for_feature
def summary_messages(self, start_dt, end_dt): messages_summary = {} pair = 'xrp_jpy' candle_types = ['1min', '5min', '15min', '30min', '1hour'] for candle_type in candle_types: candles = API().get_candles(pair, candle_type, start_dt, end_dt) candles = pd.DataFrame(candles, columns=['open', 'high', 'low', 'close', 'volume', 'timestamp']) candles.index = candles['timestamp'] messages_summary[candle_type] = candles return messages_summary
def create_dataset_api(start_dt, end_dt, pair, candle_type): start_dt = datetime.strptime(start_dt, '%Y-%m-%d %H:%M:%S') end_dt = datetime.strptime(end_dt, '%Y-%m-%d %H:%M:%S') candles_all = None while True: end_dt_split = start_dt + timedelta(days=7) - timedelta(seconds=1) if end_dt_split < end_dt: print('get candle during {} - {}'.format( start_dt.strftime('%Y-%m-%d %H:%M:%S'), end_dt_split.strftime('%Y-%m-%d %H:%M:%S'))) candles = API().get_candles( pair, candle_type=candle_type, start_dt=start_dt.strftime('%Y-%m-%d %H:%M:%S'), end_dt=end_dt_split.strftime('%Y-%m-%d %H:%M:%S')) start_dt = end_dt_split + timedelta(seconds=1) candles = pd.DataFrame(candles, columns=[ 'open', 'high', 'low', 'close', 'volume', 'timestamp' ]) if candles_all is None: candles_all = pd.DataFrame(candles) else: candles_all = pd.concat([candles_all, candles]) else: print('get candle during {} - {}'.format( start_dt.strftime('%Y-%m-%d %H:%M:%S'), end_dt.strftime('%Y-%m-%d %H:%M:%S'))) candles = API().get_candles( pair, candle_type=candle_type, start_dt=start_dt.strftime('%Y-%m-%d %H:%M:%S'), end_dt=end_dt.strftime('%Y-%m-%d %H:%M:%S')) candles = pd.DataFrame(candles, columns=[ 'open', 'high', 'low', 'close', 'volume', 'timestamp' ]) if candles_all is None: candles_all = pd.DataFrame(candles) else: candles_all = pd.concat([candles_all, candles]) break time.sleep(10) return candles_all
def extract_feature(start_dt, end_dt, pair, candle_types, df_all=None): features_all = None for candle_type in candle_types: params = get_params(candle_type) if df_all is not None: logger.debug('candle type: {}'.format(candle_type)) candles = df_all[candle_type] candles.index = candles.timestamp.map(lambda x: datetime.fromtimestamp(x / 1000)) # 時刻ごとに特徴量を算出(並列処理) args = [(candles[(d - timedelta(minutes=130) <= candles.index) & (candles.index <= d)], params, candle_type, d) for d in datetimerange(str2dt(start_dt), str2dt(end_dt) + timedelta(minutes=1))] tmp_features = multi_process(args) # 必要な時間のみ抽出 features = None dts = [d for d in datetimerange(str2dt(start_dt), str2dt(end_dt) + timedelta(minutes=1))] for dt, tmp_feature in zip(dts, tmp_features): feature = tmp_feature[tmp_feature.index == dt] if features is None: features = feature else: features = pd.concat([features, feature]) del tmp_features gc.collect() else: start_dt_ext = (datetime.strptime(start_dt, '%Y-%m-%d %H:%M:%S') - timedelta(minutes=130)).strftime('%Y-%m-%d %H:%M:%S') candles = API().get_candles(pair, candle_type=candle_type, start_dt=start_dt_ext, end_dt=end_dt) candles = pd.DataFrame(candles, columns=['open', 'high', 'low', 'close', 'volume', 'timestamp']) candles.index = candles.timestamp.map(lambda x: datetime.fromtimestamp(x / 1000)) candles.to_csv('candles_{}_{}.csv'.format(end_dt, candle_type)) features = _extract_feature(candles, params, candle_type, end_dt) features.to_csv('features_{}_{}.csv'.format(end_dt, candle_type)) ''' features = features.loc[(start_dt <= features.index) & (features.index <= end_dt)] ''' features.columns = [c + '_' + candle_type for c in features.columns] if features_all is None: features_all = features else: features_all = pd.concat([features_all, features], axis=1) features_all = features_all.fillna(method='ffill') del features gc.collect() return features_all
def save_depth(pair='xrp_jpy', db=0): pool = redis.ConnectionPool(host='localhost', port=6379, db=db) conn = redis.StrictRedis(connection_pool=pool) depth = API().get_depth(pair) dt = str2dt(format_dt(dt2str(datetime.now()), '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S')) conn.set(dt2str(dt), json.dumps(depth)) logger.debug('save depth at timestamp: {}'.format(dt))
def save_transactions(pair='xrp_jpy', db=1): pool = redis.ConnectionPool(host='localhost', port=6379, db=db) conn = redis.StrictRedis(connection_pool=pool) end_dt = str2dt(format_dt(dt2str(datetime.now()), '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S')) start_dt = end_dt - timedelta(seconds=5) #logger.debug('get transactions from {} to {}'.format(start_dt, end_dt)) transactions = API().get_transactions(pair, start_dt, end_dt) conn.set(dt2str(end_dt), json.dumps(transactions)) logger.debug('save transactions at timestamp: {}'.format(end_dt))
def worker(): pair = 'xrp_jpy' try: pool = redis.ConnectionPool(host='localhost', port=6379, db=0) conn = redis.StrictRedis(connection_pool=pool) end_dt = str2dt(format_dt(dt2str(datetime.now()), '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S')) start_dt = end_dt - timedelta(seconds=5) logger.debug('get transactions from {} to {}'.format(start_dt, end_dt)) transactions = API().get_transactions(pair, start_dt, end_dt) depth = API().get_depth(pair) conn.set(depth['timestamp'], json.dumps(depth)) conn.set(depth['timestamp'], json.dumps(depth)) logger.debug('save depth at timestamp: {}'.format(depth['timestamp'])) except Exception: logger.error(traceback.format_exc()) logger.debug('process reboot') os.execv(sys.executable, [sys.executable] + ['collect/collect_depth.py'])
def worker(): try: assets = prv.get_asset() asset_xrp = assets['assets'][3]['onhand_amount'] asset_jpy = assets['assets'][0]['onhand_amount'] start_dt = (datetime.now() - timedelta(minutes=5)).strftime('%Y-%m-%d %H:%M:%S') end_dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S') from trade.api import API pair = 'xrp_jpy' candle_type = '1min' candles = API().get_candles(pair, candle_type, start_dt, end_dt) latest_close = candles[-1][3] asset_all = float(asset_jpy) + float(asset_xrp) * latest_close logger.debug('jpy: {}, xrp: {}, all: {}'.format( asset_jpy, asset_xrp, asset_all)) except Exception: logger.error(traceback.format_exc()) logger.debug('process reboot') os.execv(sys.executable, [sys.executable] + ['collect/collect_asset.py'])
def main(pair, candle_type, start_dt, end_dt, is_predict): # 買い〜売り:RED # 売り〜買い:BLACK ''' start_dt = datetime.strptime(start_dt, '%Y-%m-%d %H:%M:%S') end_dt = datetime.strptime(end_dt, '%Y-%m-%d %H:%M:%S') # トレード履歴を取得してフォーマット with open(dirpath) as f: debug_log = [line for line in f.readlines() if '[INFO]' in line] trades = extract_trades(debug_log) trades = pd.DataFrame(trades) trades.columns = ['log_time', 'trade_time', 'action', 'amount', 'order_price', 'actual_price'] trades['trade_time'] = pd.to_datetime(trades['trade_time'].apply(lambda x: datetime.fromtimestamp(x / 1000))) trades = trades[(trades['trade_time'] >= start_dt) & (trades['trade_time'] < end_dt)] # 先頭は’buy’からカウント if (trades.action.iloc[0] == 'exit_long') or (trades.action.iloc[0] == 'exit_short'): trades = trades[1:] # 末尾は'sell'まで if (trades.action.iloc[trades.shape[0] - 1] == 'entry_long') or (trades.action.iloc[trades.shape[0] - 1] == 'entry_short'): trades = trades[:-1] # 指定した時刻のローソク足を取得 candles = API().get_candles(pair, candle_type, start_dt.strftime('%Y-%m-%d %H:%M:%S'), end_dt.strftime('%Y-%m-%d %H:%M:%S')) x = pd.to_datetime([datetime.fromtimestamp(c[5] / 1000).strftime('%Y-%m-%d %H:%M:%S') for c in candles]) y = [float(c[3]) for c in candles] ''' start_dt = datetime.strptime(start_dt, '%Y-%m-%d %H:%M:%S') end_dt = datetime.strptime(end_dt, '%Y-%m-%d %H:%M:%S') history = pd.DataFrame(prv.get_trade_history(pair='xrp_jpy', order_count=10000)['trades']).sort_index(ascending=False) formatted_history = format_trade_history(history, start_dt, end_dt) clean_history = drop_strange_history(formatted_history, amount=10) if not check_buysell_order(clean_history): print('order of trade type is invalid') sys.exit() # 指定した時刻のローソク足を取得 candles = API().get_candles(pair, candle_type, start_dt.strftime('%Y-%m-%d %H:%M:%S'), end_dt.strftime('%Y-%m-%d %H:%M:%S')) x = pd.to_datetime([datetime.fromtimestamp(c[5] / 1000).strftime('%Y-%m-%d %H:%M:%S') for c in candles]) y = [float(c[3]) for c in candles] # トレード履歴からカラーマップを作成 colors = pd.Series(['BLACK'] * len(x), index=x) prev_trade_dt = start_dt for curr_trade_dt, row in clean_history.iterrows(): curr_trade_dt = curr_trade_dt.strftime('%Y-%m-%d %H:%M:00') if row['side'] == 'buy': colors[(colors.index >= prev_trade_dt) & (colors.index <= curr_trade_dt)] = 'BLACK' elif row['side'] == 'sell': colors[(colors.index >= prev_trade_dt) & (colors.index <= curr_trade_dt)] = 'RED' prev_trade_dt = curr_trade_dt ax = plot_multicolored_lines(x, y, colors) if is_predict: with open('ml/model/clf_lower.pkl', mode='rb') as f: clf_lower = pickle.load(f) candle_types = ['1min', '5min', '15min', '30min'] candles_for_feature = get_candles_for_feature(candle_types, start_dt, end_dt) results = [] for candle in candles: end_dt = datetime.fromtimestamp(candle[5] / 1000) start_dt = (end_dt - timedelta(1)).strftime('%Y-%m-%d %H:%M:%S') end_dt = end_dt.strftime('%Y-%m-%d %H:%M:%S') print(end_dt) features = None for candle_type in candle_types: #candles = API().get_candles(pair, candle_type=candle_type, start_dt=start_dt, end_dt=end_dt) #candles = pd.DataFrame(candles, columns=['open', 'high', 'low', 'close', 'volume', 'timestamp']) #candles.index = candles.timestamp.map(lambda x: datetime.fromtimestamp(x / 1000)) cff = candles_for_feature[candle_type] cff = cff[(datetime.strptime(start_dt, '%Y-%m-%d %H:%M:%S') <= cff.index) & (cff.index <= datetime.strptime(end_dt, '%Y-%m-%d %H:%M:%S'))] params = get_params('1min') feature = _extract_feature(cff, params) feature = feature.fillna(method='ffill') feature.columns = [c + '_' + candle_type for c in feature.columns] if features is None: features = feature else: features = pd.concat([features, feature], axis=1) features = features.fillna(method='ffill') from time import sleep #sleep(3) # 予測値を低く見積もって(25%点)さらにROCPの分散も考慮した値が0より大きければロング X = features[features.index == end_dt] y_lower = clf_lower.predict(X)[0] rocp_std = calc_rocp_std(start_dt, end_dt) rocp_lower = y_lower - rocp_std results.append({'y_lower': y_lower, 'rocp_std': rocp_std, 'rocp_lower': rocp_lower}) results = pd.DataFrame(results) with open('results.pkl', 'wb') as f: pickle.dump(results, f) else: with open('results.pkl', 'rb') as f: results = pickle.load(f) ax[1].plot(x, results.y_lower) ax[1].plot(x, results.rocp_std) ax[1].plot(x, results.rocp_lower) ax[1].legend(['y_lower', 'rocp_std', 'rocp_lower']) plt.show()