def __init__(self) -> None: self.storage = {} for name in config['app']['considering_exchanges']: starting_assets = config['env']['exchanges'][name]['assets'] fee = config['env']['exchanges'][name]['fee'] exchange_type = config['env']['exchanges'][name]['type'] if exchange_type == 'spot': self.storage[name] = SpotExchange(name, starting_assets, fee) elif exchange_type == 'futures': self.storage[name] = FuturesExchange( name, starting_assets, fee, settlement_currency=jh.get_config( 'env.exchanges.{}.settlement_currency'.format(name)), futures_leverage_mode=jh.get_config( 'env.exchanges.{}.futures_leverage_mode'.format(name)), futures_leverage=jh.get_config( 'env.exchanges.{}.futures_leverage'.format(name)), ) else: raise InvalidConfig( 'Value for exchange type in your config file in not valid. Supported values are "spot" and "futures"' )
def __init__(self) -> None: self.storage = {} for name in config['app']['considering_exchanges']: starting_assets = config['env']['exchanges'][name]['assets'] fee = config['env']['exchanges'][name]['fee'] exchange_type = config['env']['exchanges'][name]['type'] if exchange_type == 'spot': self.storage[name] = SpotExchange(name, starting_assets, fee) elif exchange_type == 'futures': settlement_currency = jh.get_config( f'env.exchanges.{name}.settlement_currency') # dirty fix to get the settlement_currency right for none-USDT pairs settlement_asset_dict = pydash.find( starting_assets, lambda asset: asset['asset'] == settlement_currency) if settlement_asset_dict is None: starting_assets[0]['asset'] = settlement_currency self.storage[name] = FuturesExchange( name, starting_assets, fee, settlement_currency=settlement_currency, futures_leverage_mode=jh.get_config( f'env.exchanges.{name}.futures_leverage_mode'), futures_leverage=jh.get_config( f'env.exchanges.{name}.futures_leverage'), ) else: raise InvalidConfig( 'Value for exchange type in your config file in not valid. Supported values are "spot" and "futures"' )
def test_get_config(monkeypatch): # assert when config does NOT exist (must return passed default) assert jh.get_config('aaaaaaa', 2020) == 2020 # assert when config does exist assert jh.get_config('env.logging.order_submission', 2020) is True # assert env is taked monkeypatch.setenv("ENV_DATABASES_POSTGRES_HOST", "db") assert jh.get_config('env.databases.postgres_host', 'default') == 'db' monkeypatch.delenv("ENV_DATABASES_POSTGRES_HOST")
def portfolio_metrics() -> List[ Union[Union[List[Union[str, Any]], List[str], List[Union[Union[str, float], Any]]], Any]]: data = stats.trades(store.completed_trades.trades, store.app.daily_balance) metrics = [ ['Total Closed Trades', data['total']], ['Total Net Profit', '{} ({})'.format(jh.format_currency(round(data['net_profit'], 4)), str(round(data['net_profit_percentage'], 2)) + '%')], ['Starting => Finishing Balance', '{} => {}'.format(jh.format_currency(round(data['starting_balance'], 2)), jh.format_currency(round(data['finishing_balance'], 2)))], ['Total Open Trades', data['total_open_trades']], ['Open PL', jh.format_currency(round(data['open_pl'], 2))], ['Total Paid Fees', jh.format_currency(round(data['fee'], 2))], ['Max Drawdown', '{}%'.format(round(data['max_drawdown'], 2))], ['Annual Return', '{}%'.format(round(data['annual_return'], 2))], ['Expectancy', '{} ({})'.format(jh.format_currency(round(data['expectancy'], 2)), str(round(data['expectancy_percentage'], 2)) + '%')], ['Avg Win | Avg Loss', '{} | {}'.format(jh.format_currency(round(data['average_win'], 2)), jh.format_currency(round(data['average_loss'], 2)))], ['Ratio Avg Win / Avg Loss', round(data['ratio_avg_win_loss'], 2)], ['Percent Profitable', str(round(data['win_rate'] * 100)) + '%'], ['Longs | Shorts', '{}% | {}%'.format(round(data['longs_percentage']), round(data['short_percentage']))], ['Avg Holding Time', jh.readable_duration(data['average_holding_period'], 3)], ['Winning Trades Avg Holding Time', np.nan if np.isnan(data['average_winning_holding_period']) else jh.readable_duration( data['average_winning_holding_period'], 3)], ['Losing Trades Avg Holding Time', np.nan if np.isnan(data['average_losing_holding_period']) else jh.readable_duration( data['average_losing_holding_period'], 3)] ] if jh.get_config('env.metrics.sharpe_ratio', True): metrics.append(['Sharpe Ratio', round(data['sharpe_ratio'], 2)]) if jh.get_config('env.metrics.calmar_ratio', False): metrics.append(['Calmar Ratio', round(data['calmar_ratio'], 2)]) if jh.get_config('env.metrics.sortino_ratio', False): metrics.append(['Sortino Ratio', round(data['sortino_ratio'], 2)]) if jh.get_config('env.metrics.omega_ratio', False): metrics.append(['Omega Ratio', round(data['omega_ratio'], 2)]) if jh.get_config('env.metrics.winning_streak', False): metrics.append(['Winning Streak', data['winning_streak']]) if jh.get_config('env.metrics.losing_streak', False): metrics.append(['Losing Streak', data['losing_streak']]) if jh.get_config('env.metrics.largest_winning_trade', False): metrics.append(['Largest Winning Trade', jh.format_currency(round(data['largest_winning_trade'], 2))]) if jh.get_config('env.metrics.largest_losing_trade', False): metrics.append(['Largest Losing Trade', jh.format_currency(round(data['largest_losing_trade'], 2))]) if jh.get_config('env.metrics.total_winning_trades', False): metrics.append(['Total Winning Trades', data['total_winning_trades']]) if jh.get_config('env.metrics.total_losing_trades', False): metrics.append(['Total Losing Trades', data['total_losing_trades']]) return metrics
def _telegram(msg): token = jh.get_config('env.notifications.telegram_bot_token', '') chat_IDs: list = jh.get_config('env.notifications.telegram_chat_IDs', []) if not token or not len(chat_IDs) or not config['env']['notifications'][ 'enable_notifications']: return for id in chat_IDs: requests.get( 'https://api.telegram.org/bot{}/sendMessage?chat_id={}&parse_mode=Markdown&text={}' .format(token, id, msg))
def _telegram_errors_bot(msg: str) -> None: token = jh.get_config( 'env.notifications.error_notifier.telegram_bot_token', '') chat_IDs: list = jh.get_config( 'env.notifications.error_notifier.telegram_chat_IDs', []) if not token or not len(chat_IDs) or not config['env']['notifications'][ 'enable_notifications']: return for _id in chat_IDs: requests.get( f'https://api.telegram.org/bot{token}/sendMessage?chat_id={_id}&parse_mode=Markdown&text={msg}' )
def test_get_config(monkeypatch): # assert when config does NOT exist (must return passed default) assert jh.get_config('aaaaaaa', 2020) == 2020 # assert when config does exist assert jh.get_config('env.logging.order_submission', 2020) is True # assert env is took monkeypatch.setenv("ENV_DATABASES_POSTGRES_HOST", "db") assert jh.get_config('env.databases.postgres_host', 'default') == 'db' monkeypatch.delenv("ENV_DATABASES_POSTGRES_HOST") # assert env is took with space monkeypatch.setenv("ENV_EXCHANGES_BINANCE_FUTURES_SETTLEMENT_CURRENCY", 'BUSD') assert jh.get_config('env.exchanges.Binance Futures.settlement_currency', 'USDT') == 'BUSD' monkeypatch.delenv("ENV_EXCHANGES_BINANCE_FUTURES_SETTLEMENT_CURRENCY")
def vwma(candles: np.ndarray, period: int = 20, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ VWMA - Volume Weighted Moving Average :param candles: np.ndarray :param period: int - default: 20 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) res = ti.vwma(np.ascontiguousarray(source), np.ascontiguousarray(candles[:, 5]), period=period) return np.concatenate( (np.full((candles.shape[0] - res.shape[0]), np.nan), res), axis=0) if sequential else res[-1]
def mcginley_dynamic(candles: np.ndarray, period: int = 10, k: float = 0.6, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ McGinley Dynamic :param candles: np.ndarray :param period: int - default: 10 :param k: float - default: 0.6 :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) mg = md_fast(source, k, period) if sequential: return mg else: return None if np.isnan(mg[-1]) else mg[-1]
def emd(candles: np.ndarray, period: int = 20, delta=0.5, fraction=0.1, sequential: bool = False) -> EMD: """ Empirical Mode Decomposition by John F. Ehlers and Ric Way :param candles: np.ndarray :param period: int - default=20 :param delta: float - default=0.5 :param fraction: float - default=0.1 :param sequential: bool - default=False :return: EMD(upperband, middleband, lowerband) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] price = (candles[:, 3] + candles[:, 4]) / 2 bp = bp_fast(price, period, delta) mean = talib.SMA(bp, timeperiod=2 * period) peak, valley = peak_valley_fast(bp, price) avg_peak = fraction * talib.SMA(peak, timeperiod=50) avg_valley = fraction * talib.SMA(valley, timeperiod=50) if sequential: return EMD(avg_peak, mean, avg_valley) else: return EMD(avg_peak[-1], mean[-1], avg_valley[-1])
def save_daily_portfolio_balance() -> None: balances = [] # add exchange balances for key, e in store.exchanges.storage.items(): balances.append(e.assets[jh.app_currency()]) # store daily_balance of assets into database if jh.is_livetrading(): for asset_key, asset_value in e.assets.items(): store_daily_balance_into_db({ 'id': jh.generate_unique_id(), 'timestamp': jh.now(), 'identifier': jh.get_config('env.identifier', 'main'), 'exchange': e.name, 'asset': asset_key, 'balance': asset_value, }) # add open position values for key, pos in store.positions.storage.items(): if pos.is_open: balances.append(pos.pnl) total = sum(balances) store.app.daily_balance.append(total) logger.info('Saved daily portfolio balance: {}'.format(round(total, 2)))
def adosc(candles: np.ndarray, fast_period: int = 3, slow_period: int = 10, sequential: bool = False) -> Union[float, np.ndarray]: """ ADOSC - Chaikin A/D Oscillator :param candles: np.ndarray :param fast_period: int - default: 3 :param slow_period: int - default: 10 :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] res = talib.ADOSC(candles[:, 3], candles[:, 4], candles[:, 2], candles[:, 5], fastperiod=fast_period, slowperiod=slow_period) if sequential: return res else: return None if np.isnan(res[-1]) else res[-1]
def fisher(candles: np.ndarray, period: int = 9, sequential: bool = False) -> FisherTransform: """ The Fisher Transform helps identify price reversals. :param candles: np.ndarray :param period: int - default: 9 :param sequential: bool - default=False :return: FisherTransform(fisher, signal) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] fisher, fisher_signal = ti.fisher(np.ascontiguousarray(candles[:, 3]), np.ascontiguousarray(candles[:, 4]), period=period) if sequential: return FisherTransform( np.concatenate((np.full( (candles.shape[0] - fisher.shape[0]), np.nan), fisher), axis=0), np.concatenate( (np.full((candles.shape[0] - fisher_signal.shape[0]), np.nan), fisher_signal))) else: return FisherTransform(fisher[-1], fisher_signal[-1])
def bollinger_bands_width(candles: np.ndarray, period: int = 20, devup: float = 2, devdn: float = 2, matype: int = 0, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ BBW - Bollinger Bands Width - Bollinger Bands Bandwidth :param candles: np.ndarray :param period: int - default: 20 :param devup: float - default: 2 :param devdn: float - default: 2 :param matype: int - default: 0 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) upperbands, middlebands, lowerbands = talib.BBANDS(source, timeperiod=period, nbdevup=devup, nbdevdn=devdn, matype=matype) if sequential: return (upperbands - lowerbands) / middlebands else: return (upperbands[-1] - lowerbands[-1]) / middlebands[-1]
def error(msg: str) -> None: if jh.app_mode() not in LOGGERS: _init_main_logger() # error logs should be logged as info logs as well info(msg) msg = str(msg) from jesse.store import store log_id = jh.generate_unique_id() log_dict = { 'id': log_id, 'timestamp': jh.now_to_timestamp(), 'message': msg } if jh.is_live() and jh.get_config('env.notifications.events.errors', True): # notify_urgently(f"ERROR at \"{jh.get_config('env.identifier')}\" account:\n{msg}") notify_urgently(f"ERROR:\n{msg}") notify(f'ERROR:\n{msg}') if (jh.is_backtesting() and jh.is_debugging()) or jh.is_collecting_data() or jh.is_live(): sync_publish('error_log', log_dict) store.logs.errors.append(log_dict) if jh.is_live() or jh.is_optimizing(): msg = f"[ERROR | {jh.timestamp_to_time(jh.now_to_timestamp())[:19]}] {msg}" logger = LOGGERS[jh.app_mode()] logger.error(msg) if jh.is_live(): from jesse.models.utils import store_log_into_db store_log_into_db(log_dict, 'error')
def reflex(candles: np.ndarray, period: int = 20, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ Reflex indicator by John F. Ehlers :param candles: np.ndarray :param period: int - default=20 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) ssf = supersmoother_fast(source, period / 2) rf = reflex_fast(ssf, period) if sequential: return rf else: return None if np.isnan(rf[-1]) else rf[-1]
def vidya(candles: np.ndarray, short_period: int = 2, long_period: int = 5, alpha: float = 0.2, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ VIDYA - Variable Index Dynamic Average :param candles: np.ndarray :param short_period: int - default: 2 :param long_period: int - default: 5 :param alpha: float - default: 0.2 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) res = ti.vidya(np.ascontiguousarray(source), short_period=short_period, long_period=long_period, alpha=alpha) return np.concatenate( (np.full((candles.shape[0] - res.shape[0]), np.nan), res), axis=0) if sequential else res[-1]
def mom(candles: np.ndarray, period: int = 10, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ MOM - Momentum :param candles: np.ndarray :param period: int - default=10 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) res = talib.MOM(source, timeperiod=period) if sequential: return res else: return None if np.isnan(res[-1]) else res[-1]
def var(candles: np.ndarray, period: int = 14, nbdev: float = 1, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ VAR - Variance :param candles: np.ndarray :param period: int - default=14 :param nbdev: float - default=1 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) res = talib.VAR(candles[:, 2], timeperiod=period, nbdev=nbdev) if sequential: return res else: return None if np.isnan(res[-1]) else res[-1]
def dec_osc(candles: np.ndarray, hp_period: int = 125, k: float = 1, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ Ehlers Decycler Oscillator :param candles: np.ndarray :param hp_period: int - default=125 :param k: float - default=1 :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) hp = high_pass_2_pole_fast(source, hp_period) dec = source - hp decosc = high_pass_2_pole_fast(dec, 0.5 * hp_period) res = 100 * k * decosc / source if sequential: return res else: return None if np.isnan(res[-1]) else res[-1]
def gauss(candles: np.ndarray, period: int = 14, poles: int = 4, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]: """ Gaussian Filter :param candles: np.ndarray :param period: int - default=14 :param poles: int - default=4 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) fil, to_fill = gauss_fast(source, period, poles) if to_fill != 0: res = np.insert(fil[poles:], 0, np.repeat(np.nan, to_fill)) else: res = fil[poles:] if sequential: return res else: return None if np.isnan(res[-1]) else res[-1]
def srsi(candles: np.ndarray, period: int = 14, period_stoch: int = 14, k: int = 3, d: int = 3, source_type: str = "close", sequential: bool = False) -> StochasticRSI: """ Stochastic RSI :param candles: np.ndarray :param period_rsi: int - default: 14 - RSI Length :param period_stoch: int - default: 14 - Stochastic Length :param k: int - default: 3 :param d: int - default: 3 :param source_type: str - default: "close" :param sequential: bool - default=False :return: StochasticRSI(k, d) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) rsi_np = talib.RSI(source, timeperiod=period) rsi_np = rsi_np[np.logical_not(np.isnan(rsi_np))] fast_k, fast_d = ti.stoch(rsi_np, rsi_np, rsi_np, period_stoch, k, d) if sequential: fast_k = np.concatenate((np.full((candles.shape[0] - fast_k.shape[0]), np.nan), fast_k), axis=0) fast_d = np.concatenate((np.full((candles.shape[0] - fast_d.shape[0]), np.nan), fast_d), axis=0) return StochasticRSI(fast_k, fast_d) else: return StochasticRSI(fast_k[-1], fast_d[-1])
def frama(candles: np.ndarray, window: int = 10, FC: int = 1, SC: int = 300, sequential: bool = False) -> Union[float, np.ndarray]: """ Fractal Adaptive Moving Average (FRAMA) :param candles: np.ndarray :param window: int - default: 10 :param FC: int - default: 1 :param SC: int - default: 300 :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] n = window # n must be even if n % 2 == 1: print("FRAMA n must be even. Adding one") n += 1 frama = frame_fast(candles, n, SC, FC) if sequential: return frama else: return frama[-1]
def supersmoother(candles: np.ndarray, period: int = 14, source_type: str = "close", sequential: bool = False) -> Union[ float, np.ndarray]: """ Super Smoother Filter 2pole Butterworth This indicator was described by John F. Ehlers :param candles: np.ndarray :param period: int - default=14 :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] # Accept normal array too. if len(candles.shape) == 1: source = candles else: source = get_candle_source(candles, source_type=source_type) res = supersmoother_fast(source, period) if sequential: return res else: return None if np.isnan(res[-1]) else res[-1]
def safezonestop(candles: np.ndarray, period: int = 22, mult: float = 2.5, max_lookback: int = 3, direction: str = "long", sequential: bool = False) -> Union[float, np.ndarray]: """ Safezone Stops :param candles: np.ndarray :param period: int - default=22 :param mult: float - default=2.5 :param max_lookback: int - default=3 :param direction: str - default=long :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] high = candles[:, 3] low = candles[:, 4] last_high = np_shift(high, 1, fill_value=np.nan) last_low = np_shift(low, 1, fill_value=np.nan) if direction == "long": res = last_low - mult * talib.MINUS_DM(high, low, timeperiod=period) swv = sliding_window_view(res, window_shape=max_lookback) res = np.max(swv, axis=-1) else: res = last_high + mult * talib.PLUS_DM(high, low, timeperiod=period) swv = sliding_window_view(res, window_shape=max_lookback) res = np.min(swv, axis=-1) return np.concatenate((np.full((candles.shape[0] - res.shape[0]), np.nan), res), axis=0) if sequential else res[-1]
def vwap(candles: np.ndarray, source_type: str = "hlc3", anchor: str = "D", sequential: bool = False) -> Union[float, np.ndarray]: """ VWAP :param candles: np.ndarray :param source_type: str - default: "close" :param sequential: bool - default=False :return: float | np.ndarray """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) group_idx = candles[:, 0].astype('datetime64[ms]').astype( 'datetime64[{}]'.format(anchor)).astype('int') vwap = aggregate(group_idx, candles[:, 5] * source, func='cumsum') vwap /= aggregate(group_idx, candles[:, 5], func='cumsum') if sequential: return vwap else: return None if np.isnan(vwap[-1]) else vwap[-1]
def macd(candles: np.ndarray, fast_period: int = 12, slow_period: int = 26, signal_period: int = 9, source_type: str = "close", sequential: bool = False) -> MACD: """ MACD - Moving Average Convergence/Divergence :param candles: np.ndarray :param fast_period: int - default: 12 :param slow_period: int - default: 26 :param signal_period: int - default: 9 :param source_type: str - default: "close" :param sequential: bool - default: False :return: MACD(macd, signal, hist) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) macd, macdsignal, macdhist = talib.MACD(source, fastperiod=fast_period, slowperiod=slow_period, signalperiod=signal_period) if sequential: return MACD(macd, macdsignal, macdhist) else: return MACD(macd[-1], macdsignal[-1], macdhist[-1])
def minmax(candles: np.ndarray, order: int = 3, sequential: bool = False) -> EXTREMA: """ minmax - Get extrema :param candles: np.ndarray :param order: int - default = 3 :param sequential: bool - default=False :return: EXTREMA(min, max, last_min, last_max) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] low = candles[:, 4] high = candles[:, 3] minimaIdxs = argrelextrema(low, np.less, order=order, axis=0) maximaIdxs = argrelextrema(high, np.greater, order=order, axis=0) min = np.full_like(low, np.nan) max = np.full_like(high, np.nan) # set the extremas with the matching price min[minimaIdxs] = low[minimaIdxs] max[maximaIdxs] = high[maximaIdxs] # forward fill Nan values to get the last extrema last_min = np_ffill(min) last_max = np_ffill(max) if sequential: return EXTREMA(min, max, last_min, last_max) else: return EXTREMA(min[-1], max[-1], last_min[-1], last_max[-1])
def mama(candles: np.ndarray, fastlimit: float = 0.5, slowlimit: float = 0.05, source_type: str = "close", sequential: bool = False) -> MAMA: """ MAMA - MESA Adaptive Moving Average :param candles: np.ndarray :param fastlimit: float - default: 0.5 :param slowlimit: float - default: 0.05 :param source_type: str - default: "close" :param sequential: bool - default=False :return: MAMA(mama, fama) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] source = get_candle_source(candles, source_type=source_type) mama, fama = talib.MAMA(source, fastlimit=fastlimit, slowlimit=slowlimit) if sequential: return MAMA(mama, fama) else: return MAMA(mama[-1], fama[-1])
def supertrend(candles: np.ndarray, period: int = 10, factor: float = 3, sequential: bool = False) -> SuperTrend: """ SuperTrend :param candles: np.ndarray :param period: int - default=14 :param factor: float - default=3 :param sequential: bool - default=False :return: SuperTrend(trend, changed) """ warmup_candles_num = get_config('env.data.warmup_candles_num', 240) if not sequential and len(candles) > warmup_candles_num: candles = candles[-warmup_candles_num:] # calculation of ATR using TALIB function atr = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=period) super_trend, changed = supertrend_fast(candles, atr, factor, period) if sequential: return SuperTrend(super_trend, changed) else: return SuperTrend(super_trend[-1], changed[-1])