def rsi_nb(ts, windows, is_ewm, is_min_periods): """For each window, calculate the RSI.""" delta = diff_nb(ts, 1)[1:, :] # otherwise ewma will be all NaN up, down = delta.copy(), delta.copy() up = set_by_mask_nb(up, up < 0, 0) down = np.abs(set_by_mask_nb(down, down > 0, 0)) # Cache moving averages to effectively reduce the number of operations unique_windows = np.unique(windows) cache_d = dict() for i in range(unique_windows.shape[0]): if is_ewm: roll_up = ewm_mean_nb(up, unique_windows[i]) roll_down = ewm_mean_nb(down, unique_windows[i]) else: roll_up = rolling_mean_nb(up, unique_windows[i]) roll_down = rolling_mean_nb(down, unique_windows[i]) roll_up = prepend_nb(roll_up, 1, np.nan) # bring to old shape roll_down = prepend_nb(roll_down, 1, np.nan) if is_min_periods: roll_up[:unique_windows[i], :] = np.nan roll_down[:unique_windows[i], :] = np.nan cache_d[unique_windows[i]] = roll_up, roll_down # Calculate RSI rsi = np.empty((ts.shape[0], ts.shape[1] * windows.shape[0]), dtype=f8) for i in range(windows.shape[0]): roll_up, roll_down = cache_d[windows[i]] rsi[:, i * ts.shape[1]:(i + 1) * ts.shape[1]] = 100 - 100 / (1 + roll_up / roll_down) return rsi
def bb_nb(ts, ns, ks, is_ewm, is_min_periods): """For each N and K, calculate the corresponding upper, middle and lower BB bands.""" # Cache moving averages to effectively reduce the number of operations unique_windows = np.unique(ns) cache_d = dict() for i in range(unique_windows.shape[0]): if is_ewm: ma = ewm_mean_nb(ts, unique_windows[i]) mstd = ewm_std_nb(ts, unique_windows[i]) else: ma = rolling_mean_nb(ts, unique_windows[i]) mstd = rolling_std_nb(ts, unique_windows[i]) if is_min_periods: ma[:unique_windows[i], :] = np.nan mstd[:unique_windows[i], :] = np.nan cache_d[unique_windows[i]] = ma, mstd # Calculate lower, middle and upper bands upper = np.empty((ts.shape[0], ts.shape[1] * ns.shape[0]), dtype=f8) middle = np.empty((ts.shape[0], ts.shape[1] * ns.shape[0]), dtype=f8) lower = np.empty((ts.shape[0], ts.shape[1] * ns.shape[0]), dtype=f8) for i in range(ns.shape[0]): ma, mstd = cache_d[ns[i]] upper[:, i * ts.shape[1]:(i + 1) * ts.shape[1]] = ma + ks[i] * mstd # (MA + Kσ) middle[:, i * ts.shape[1]:(i + 1) * ts.shape[1]] = ma # MA lower[:, i * ts.shape[1]:(i + 1) * ts.shape[1]] = ma - ks[i] * mstd # (MA - Kσ) return upper, middle, lower
def stoch_apply_func_nb(close_ts, high_ts, low_ts, k_window, d_window, ewm, cache_dict): roll_min, roll_max = cache_dict[k_window] percent_k = 100 * (close_ts - roll_min) / (roll_max - roll_min) if ewm: percent_d = ewm_mean_nb(percent_k, d_window) else: percent_d = rolling_mean_nb(percent_k, d_window) percent_d[:k_window + d_window - 2, :] = np.nan # min_periods for ewm return percent_k, percent_d
def rsi_caching_nb(ts, windows, ewms): delta = diff_nb(ts)[1:, :] # otherwise ewma will be all NaN up, down = delta.copy(), delta.copy() up = set_by_mask_nb(up, up < 0, 0) down = np.abs(set_by_mask_nb(down, down > 0, 0)) # Cache cache_dict = dict() for i in range(windows.shape[0]): if (windows[i], int(ewms[i])) not in cache_dict: if ewms[i]: roll_up = ewm_mean_nb(up, windows[i]) roll_down = ewm_mean_nb(down, windows[i]) else: roll_up = rolling_mean_nb(up, windows[i]) roll_down = rolling_mean_nb(down, windows[i]) roll_up = prepend_nb(roll_up, 1, np.nan) # bring to old shape roll_down = prepend_nb(roll_down, 1, np.nan) cache_dict[(windows[i], int(ewms[i]))] = roll_up, roll_down return cache_dict
def ma_caching_nb(ts, windows, ewms): cache_dict = dict() for i in range(windows.shape[0]): if (windows[i], int(ewms[i])) not in cache_dict: if ewms[i]: ma = ewm_mean_nb(ts, windows[i]) else: ma = rolling_mean_nb(ts, windows[i]) cache_dict[(windows[i], int(ewms[i]))] = ma return cache_dict
def macd_apply_func_nb(ts, fast_window, slow_window, signal_window, ewm, cache_dict): fast_ma = cache_dict[(fast_window, int(ewm))] slow_ma = cache_dict[(slow_window, int(ewm))] macd_ts = fast_ma - slow_ma if ewm: signal_ts = ewm_mean_nb(macd_ts, signal_window) else: signal_ts = rolling_mean_nb(macd_ts, signal_window) signal_ts[:max(fast_window, slow_window) + signal_window - 2, :] = np.nan # min_periods for ewm return np.copy(fast_ma), np.copy(slow_ma), macd_ts, signal_ts
def dmac_nb(ts, fast_windows, slow_windows, is_ewm, is_min_periods): """For each fast and slow window, calculate the corresponding SMA/EMA.""" # Cache moving averages to effectively reduce the number of operations unique_windows = np.unique(np.concatenate((fast_windows, slow_windows))) cache_d = dict() for i in range(unique_windows.shape[0]): if is_ewm: ma = ewm_mean_nb(ts, unique_windows[i]) else: ma = rolling_mean_nb(ts, unique_windows[i]) if is_min_periods: ma[:unique_windows[i], :] = np.nan cache_d[unique_windows[i]] = ma # Concatenate moving averages out of cache and return fast_mas = np.empty((ts.shape[0], ts.shape[1] * fast_windows.shape[0]), dtype=f8) slow_mas = np.empty((ts.shape[0], ts.shape[1] * fast_windows.shape[0]), dtype=f8) for i in range(fast_windows.shape[0]): fast_mas[:, i * ts.shape[1]:(i + 1) * ts.shape[1]] = cache_d[fast_windows[i]] slow_mas[:, i * ts.shape[1]:(i + 1) * ts.shape[1]] = cache_d[slow_windows[i]] return fast_mas, slow_mas