def _all_candlestick_patterns(self, df): h = df.High.values o = df.Open.values l = df.Low.values c = df.Close.values v = df.Volume.values name = 'CDL2CROWS' df[name] = talib.CDL2CROWS(o,h,l,c) talib.get_function_groups() return df
def candle_analyse(df): ''' input:OHLC dataframe ''' cn_names = [] ## calc all candle score open, high, low, close = df['open'], df['high'], df['low'], df['close'] df = df[['date']].copy() func_names = talib.get_function_groups()['Pattern Recognition'] for func_name in func_names: func = getattr(talib, func_name) score_data = func(open, high, low, close) patt_name = func_name[3:] # if score_data!=0: df[patt_name] = score_data cn_names.append(patt_name) def make_dict(row): res_d = OrderedDict() for func_name in func_names: patt_name = func_name[3:] score = row[patt_name] if score != 0: res_d[patt_name] = score st = ','.join(['%s:%s' % (k, v) for k, v in res_d.items()]) return st df['CDLList'] = df.apply(make_dict, axis=1) ### total_cdl_score = df[cn_names].sum(axis=1) df['CDLScore'] = total_cdl_score # print jsdump(cdl_info) return df
def getTriggeredPatternsPrediction(stockData, date, timeGap=1): patternFuncs = talib.get_function_groups()['Pattern Recognition'] patternAnalysis = {} patternAccuracy = {} for patternFunc in patternFuncs: patternAnalyzer = getattr(talib, patternFunc) predictionInfo = patternAnalyzer(stockData['Open'], stockData['High'], stockData['Low'], stockData['Close']) try: predictionInfo[stockData[stockData['Date'] == date].index[0]] except: print('The date given is a non-trading day (i.e. Weekend)') return pd.DataFrame({}) if (predictionInfo[stockData[stockData['Date'] == date].index[0]] != 0): print(patternFunc) print('Accuracy:', patternPrediction(stockData, predictionInfo, timeGap)) patternAccuracy[patternFunc] = patternPrediction( stockData, predictionInfo, timeGap) # newDF = stockData # newDF['Action'] = predictionInfo # newDF.to_csv(patternFunc+'.csv', index=False) patternAnalysis[patternFunc] = predictionInfo.tail(1) patternAnalysis = pd.DataFrame(patternAnalysis) return pd.DataFrame(patternAccuracy)
def patternDetection(historical_klines): op = [] hi = [] lo = [] cl = [] # collecting all open, high, low and close values for kline in historical_klines: op.append(float(kline[1])) hi.append(float(kline[2])) lo.append(float(kline[3])) cl.append(float(kline[4])) # converting lists into (talib-compatible) numpy arrays op = numpy.array(op, dtype=float) hi = numpy.array(hi, dtype=float) lo = numpy.array(lo, dtype=float) cl = numpy.array(cl, dtype=float) # applying the talib candle functions for pattern recognition on course data candle_names = talib.get_function_groups()['Pattern Recognition'] for candle in candle_names: results = getattr(talib, candle)(op, hi, lo, cl) for i in range(len(results)): # '0' means that no pattern was found if(results[i] != 0): dateOfMatch = datetime.today() - timedelta(days=i+1) # collect data of the match and print it print("Pattern: " + candle + " Result: " + str(results[i]) + " - " + str(dateOfMatch))
def onClickedFunctionButton(self): loader = QUiLoader() function_ui = loader.load('function_description.ui', parentWidget=self.python_editor) function_ui.setWindowTitle("函数说明") function_ui.show() self.tree = function_ui.findChild(QTreeWidget) self.demo = function_ui.findChild(QTextEdit) talib_groups = talib.get_function_groups() for key in talib_groups.keys(): node = QTreeWidgetItem(self.tree) node.setText(0, key) for value in talib_groups[key]: sub_node = QTreeWidgetItem(node) sub_node.setText(0, value) names = functions.functions_name for key in names.keys(): node = QTreeWidgetItem(self.tree) node.setText(0, key) for name in names[key].keys(): sub_node = QTreeWidgetItem(node) sub_node.setText(0, name) self.tree.itemClicked.connect( lambda event1, event2: self.onFunctionTreeItemClicked( event1, event2)) self.tree.itemDoubleClicked.connect( lambda event1, event2: self.onFunctionTreeItemDoubleClicked( event1, event2)) return
def get_ta_functions(): # func_names = [] func_groups = ['Volume Indicators', 'Volatility Indicators', 'Overlap Studies', 'Momentum Indicators'] func_names = [func_name for g in func_groups for func_name in ta.get_function_groups()[g]] funcs = {func_name: Function(func_name) for func_name in func_names} return funcs
def featurize_data(data): ''' Takes in a CSV with the following columns: ======================================= Date,Open,High,Low,Close,Volume,OpenInt ======================================= and assigns a label (-1 or 1) for each price point given whether the price point is higher or lower than the previous one. ''' # Initialize output array output = np.ones((len(data), 1)) # Iterate over function to match candlestick patterns for func in talib.get_function_groups()['Pattern Recognition']: # Interpret strings as functions and call them on numeric data func_to_call = getattr(talib, func) feature_column = func_to_call(data['Open'], data['High'], data['Low'], data['Close']).reshape(len(data), 1) # Column join indicator output for candlestick pattern features output = np.concatenate((output, feature_column), axis=1) # Concatenate labels to final dataframe output = np.concatenate((output, data['Label'].reshape(len(data), 1)), axis=1) output = output[:, 1:] return output
def get_cs_column_names(): """ gets candlestick pattern column names for df """ pattern_functions = talib.get_function_groups()['Pattern Recognition'] column_names = [p[3:] for p in pattern_functions] # for dataframe; cuts out CDL return column_names
def get_function_groups(): ''' Returns a dict with keys of function-group names and values of lists of function names ie {'group_names': ['function_names']} 返回带分组的函数列表 ''' return talib.get_function_groups()
def ohlc_to_ta_lib(data): ta_lib_dict = {'bull': [], 'bear': []} #다른 형태의 api제공을 위해 가공 ta_lib_dict_type2 = {} csv_string = data csv_data = pd.read_csv(io.StringIO(csv_string), index_col=0, parse_dates=True) patterns_names = talib.get_function_groups()['Pattern Recognition'] #ta-lib pattern recognition list print(patterns_names) for pattern in patterns_names: csv_data[pattern] = getattr(talib, pattern)(csv_data['Open'], csv_data['High'], csv_data['Low'], csv_data['Close']) tmp_data = csv_data #pattern이 없는 0칼럼들 다 제외 zero_column = list(csv_data.columns[(csv_data == 0).all()]) not_zero_column = list(set(patterns_names) - set(zero_column)) #print(not_zero_column) #csv_data.to_csv('./sample.csv', sep=',', na_rep='NaN') for pattern in not_zero_column: csv_data_bull = tmp_data[tmp_data[pattern] > 0] csv_data_bear = tmp_data[tmp_data[pattern] < 0] #각 csv_data에서 양수 인거(보통 100)은 bullish를 나타내고 음수 인거(보통 -100)은 bearish를 나타낸다. # 각 패턴이 bull version도 있고 bear 버전도 있다. 따라서 나눠야 한다. date_bull = list( map(lambda x: x.strftime("%Y-%m-%d"), csv_data_bull.index)) date_bear = list( map(lambda x: x.strftime("%Y-%m-%d"), csv_data_bear.index)) #print(date_bull) for date in date_bull: pattern_name = f'{pattern[3:]}_BULL' ta_lib_dict['bull'].append((pattern_name, date)) #다른 형태의 api제공을 위해 가공 if pattern_name not in ta_lib_dict_type2.keys(): ta_lib_dict_type2[pattern_name] = [(date, "bull")] else: ta_lib_dict_type2[pattern_name].append((date, "bull")) for date in date_bear: pattern_name = f'{pattern[3:]}_BEAR' ta_lib_dict['bear'].append((pattern_name, date)) #다른 형태의 api제공을 위해 가공 if pattern_name not in ta_lib_dict_type2.keys(): ta_lib_dict_type2[pattern_name] = [(date, "bear")] else: ta_lib_dict_type2[pattern_name].append((date, "bear")) return (ta_lib_dict, ta_lib_dict_type2, tmp_data)
def list_indicator(group=None): """ list all available indicators Parameters: ------- group: string (optional) group of indicator; if none, function will list all indicators """ if group is None: return talib.get_functions() else: if group in ["Overlap", "overlap", "over"]: return talib.get_function_groups()["Overlap Studies"] elif group is ["Momentum", "momentum", "momen", "mom"]: return talib.get_function_groups()["Momentum Indicators"] elif group is ["Volume", "volume", "volu", "vol"]: return talib.get_function_groups()["Volume Indicators"] elif group is ["Volatility", "volatility", "vola"]: return talib.get_function_groups()["Volatility Indicators"] elif group is ["Price", "price", "pri"]: return talib.get_function_groups()["Price Transform"] elif group is ["Cycle", "cycle", "cyc"]: return talib.get_function_groups()["Cycle Indicators"] elif group is ["Pattern", "pattern", "patt", "pat"]: return talib.get_function_groups()["Pattern Recognition"] else: print "ERROR: no matching group " + group + ", must be in: " print list_indicator_groups() print "Or enter no group to get full list of indicators:" print " list_indicator()" return
def list_indicator(group=None): """ list all available indicators Parameters: ------- group: string (optional) group of indicator; if none, function will list all indicators """ if group is None: return talib.get_functions() else: if group in ["Overlap", "overlap", "over"]: return talib.get_function_groups()["Overlap Studies"] elif group is ["Momentum", "momentum", "momen", "mom"]: return talib.get_function_groups()["Momentum Indicators"] elif group is ["Volume", "volume", "volu", "vol"]: return talib.get_function_groups()["Volume Indicators"] elif group is ["Volatility", "volatility", "vola"]: return talib.get_function_groups()["Volatility Indicators"] elif group is ["Price", "price", "pri"]: return talib.get_function_groups()["Price Transform"] elif group is ["Cycle", "cycle", "cyc"]: return talib.get_function_groups()["Cycle Indicators"] elif group is ["Pattern", "pattern", "patt", "pat"]: return talib.get_function_groups()["Pattern Recognition"] else: print "ERROR: no matching group "+group+", must be in: " print list_indicator_groups() print "Or enter no group to get full list of indicators:" print " list_indicator()" return
def get_indicators_by_group(group: str) -> [(str, str)]: """Returns list of select options containing abbreviations of the groups indicators""" indicator_selects = [] group_indicators = ta.get_function_groups()[group] for i in range(len(group_indicators)): abbrev = group_indicators[i] func = getattr(ab, abbrev) name = func.info["display_name"] indicator_selects.append((abbrev, abbrev)) return indicator_selects
def pattern_chandeliers(iDf, iPatternList=talib.get_function_groups()['Pattern Recognition']): """ :param iDf: :param iPatternList: :return: """ for pattern in iPatternList: func = getattr(talib.abstract, pattern) iDf.loc[:, pattern] = func(iDf) return iDf
def onFunctionTecTree(self, item, column, function_ui, data, hidden_columns): group_box = function_ui.findChild(QGroupBox, "parameter_box") display_name_box = function_ui.findChild(QLineEdit, "display_name") labels = group_box.findChildren(QLabel) spin_boxs = group_box.findChildren(QSpinBox) for label in labels: label.close() for spin_box in spin_boxs: spin_box.close() text = item.text(column) if text in talib.get_function_groups().keys(): return indicator = abstract.Function(text.lower()) info = indicator.info display_name = info.get("display_name", "") display_name_box.setText(display_name) params_dict = indicator.get_parameters() keys = list(params_dict.keys()) keys.insert(0, u"Column") locator = 20 num = 0 paras_locators = [locator + i * 30 for i in range(len(keys))] for key in keys: p = paras_locators[num] label = QLabel(group_box) label.setText(key) label.setGeometry(10, p, 100, 20) if key == "Column": combo_box = QComboBox(group_box) combo_box.setGeometry(100, p, 100, 20) combo_box.setObjectName(key) combo_box.setWhatsThis(key) columns = [ i for i in list(data.columns) if i not in hidden_columns ] combo_box.addItems(columns) if "close" in columns: combo_box.setCurrentText("close") combo_box.show() else: spin_box = QSpinBox(group_box) spin_box.setGeometry(100, p, 50, 20) spin_box.setObjectName(text) spin_box.setWhatsThis(key) value = params_dict[key] spin_box.setValue(value) spin_box.show() label.show() num += 1
def feature_dfs(): mom_ind = talib.get_function_groups()['Momentum Indicators'] over_stud = talib.get_function_groups()['Overlap Studies'] volu_ind = talib.get_function_groups()['Volume Indicators'] cyc_ind = talib.get_function_groups()['Cycle Indicators'] vola_ind = talib.get_function_groups()['Volatility Indicators'] stats_ind = talib.get_function_groups()['Statistic Functions'] talib_abstract_fun_list = mom_ind + over_stud + volu_ind + cyc_ind + vola_ind + stats_ind talib_abstract_fun_list.remove('MAVP') no_params_df = pd.DataFrame([]) only_time_period_df = pd.DataFrame([]) other_param_df = pd.DataFrame([]) for fun in talib_abstract_fun_list: info = getattr(talib.abstract, fun).info data = pd.Series([ info['group'], info['name'], info['display_name'], [ '{}: {}'.format(key, value) for key, value in info['parameters'].items() ], info['output_names'] ]) if len(info['parameters']) == 0: no_params_df = no_params_df.append(data, ignore_index=True) elif 'timeperiod' in info['parameters'] and len( info['parameters']) == 1: only_time_period_df = only_time_period_df.append(data, ignore_index=True) else: other_param_df = other_param_df.append(data, ignore_index=True) ind_dfs = [no_params_df, only_time_period_df, other_param_df] for ind_df in ind_dfs: ind_df.columns = [ 'Group', 'Name', 'Short Description', 'Parameters', 'Output Names' ] return no_params_df, only_time_period_df, other_param_df
def add_TA(df: pd.DataFrame): inputs = { 'open': df["Open"], 'high': df["High"], 'low': df["Low"], 'close': df["Close"], 'volume': df["Volume"] } overlap_studies = talib.get_function_groups()['Overlap Studies'] if "MAVP" in overlap_studies: overlap_studies.remove("MAVP") df = process_ta_functions_group(df, inputs, overlap_studies) df = process_ta_functions_group( df, inputs, talib.get_function_groups()['Momentum Indicators']) df = process_ta_functions_group( df, inputs, talib.get_function_groups()['Cycle Indicators']) for func in talib.get_function_groups()['Volume Indicators']: df[func] = globals()[func](inputs) for func in talib.get_function_groups()['Volatility Indicators']: df[func] = globals()[func](inputs) for func in talib.get_function_groups()['Pattern Recognition']: df[func] = globals()[func](inputs)
def get_price_patterns(data): """ Detects common price patterns using TA-lib, e.g. Two Crows, Belt-hold, Hanging Man etc. :param data: DataFrame with data. :return: DataFrame with pattern "likelihoods" on -200 - 200 scale. """ patterns = { name: getattr(talib, name)(data['Open Price'], data['High Price'], data['Low Price'], data['Close Price']) for name in talib.get_function_groups()['Pattern Recognition'] } return pd.DataFrame(patterns)
def onLoadFunctionDialogTab(self, index, function_ui, data, hidden_columns): if index == 0: self.cal_list1 = function_ui.findChild(QListWidget, "cal_list1") self.cal_list2 = function_ui.findChild(QListWidget, "cal_list2") self.function_box = function_ui.findChild(QComboBox, "function_box") self.list_items = [ i for i in list(data.columns) if i not in hidden_columns ] self.cal_list1.clear() self.cal_list2.clear() for item in self.list_items: self.cal_list1.addItem(item) for item in self.list_items: self.cal_list2.addItem(item) self.function_box.currentIndexChanged.connect( self.onFunctionCalculateBox) elif index == 1: self.rel_list1 = function_ui.findChild(QListWidget, "rel_list1") self.rel_list2 = function_ui.findChild(QListWidget, "rel_list2") self.function_box = function_ui.findChild(QComboBox, "rel_box") self.list_items = [ i for i in list(data.columns) if i not in hidden_columns ] self.rel_list1.clear() self.rel_list2.clear() for item in self.list_items: self.rel_list1.addItem(item) for item in self.list_items: self.rel_list2.addItem(item) self.function_box.currentIndexChanged.connect( self.onFunctionCalculateBox) elif index == 2: self.search_tec_function = function_ui.findChild( QLineEdit, "search_input") self.tec_tree = function_ui.findChild(QTreeWidget, "tec_tree") talib_groups = talib.get_function_groups() for key in talib_groups.keys(): node = QTreeWidgetItem(self.tec_tree) node.setText(0, key) for value in talib_groups[key]: sub_node = QTreeWidgetItem(node) sub_node.setText(0, value) self.tec_tree.itemClicked.connect( lambda event1, event2: self.onFunctionTecTree( event1, event2, function_ui, data, hidden_columns)) self.search_tec_function.textEdited.connect( lambda event: self.onTecFunctionSearched(function_ui))
class CDL_Patterns(CustomFactor): """ 模式识别(所有模式,多输出) .. code-block:: python cdls = CDL_Patterns() a = cdls.CDL2CROWS b = cdls.CDL3BLACKCROWS returns ------- talib所有模式值Integer """ window_length = 5 # 模式识别窗口最少为1,最多5。取5? inputs = [ USEquityPricing.open, USEquityPricing.high, USEquityPricing.low, USEquityPricing.close ] mask = IsStock() outputs = ta.get_function_groups()['Pattern Recognition'] window_safe = True def _validate(self): super(CDL_Patterns, self)._validate() if self.window_length < 5: raise ValueError( "For pattern recognition,'CDL_Patterns' expected a window length of at least 5, but was " "given {window_length}. ".format( window_length=self.window_length)) def _one_pattern(self, out, func_name, opens, highs, lows, closes, N): """计算指定模式名称因子""" p_func = ta.abstract.Function(func_name) res = np.zeros(N) for i in range(N): inputs = { 'open': opens[:, i], 'high': highs[:, i], 'low': lows[:, i], 'close': closes[:, i] } p_func.input_arrays = inputs res[i] = p_func.outputs[-1] setattr(out, func_name, res) def compute(self, today, assets, out, opens, highs, lows, closes): N = len(assets) for func_name in self.outputs: self._one_pattern(out, func_name, opens, highs, lows, closes, N)
def get_groups_markdown(update=False): def unpluralize(noun): if noun.endswith('s'): if len(noun) > 2 and noun[-2] not in ["'", 'e']: return noun[:-1] return noun doc_links = get_doc_links(update) ret = {} for group, funcs in talib.get_function_groups().items(): h1 = '# %s' % unpluralize(group) h1 = h1 + ' Functions' if 'Function' not in h1 else h1 + 's' ret[group] = [h1] for func in funcs: # figure out this function's options f = Function(func) inputs = f.info['input_names'] if 'prices' in inputs: input_names = ', '.join(inputs['prices']) else: input_names = ', '.join([x for x in inputs.values() if x]) params = ', '.join( ['%s=%i' % (param, default) for param, default in f.info['parameters'].items()]) outputs = ', '.join(f.info['output_names']) # print the header ret[group].append('### %s - %s' % (func, f.info['display_name'])) # print the code definition block ret[group].append("```") if params: ret[group].append('%s = %s(%s, %s)' % ( outputs, func.upper(), input_names, params)) else: ret[group].append('%s = %s(%s)' % ( outputs, func.upper(), input_names)) ret[group].append("```\n") # print extra info if we can if func in doc_links: ret[group].append( 'Learn more about the %s at [tadoc.org](%s). ' % ( f.info['display_name'], doc_links[func])) ret[group].append('\n[Documentation Index](../doc_index.html)') ret[group].append('[FLOAT_RIGHTAll Function Groups](../funcs.html)') ret[group] = '\n'.join(ret[group]) + '\n' return ret
def get_groups_markdown(update=False): def unpluralize(noun): if noun.endswith('s'): if len(noun) > 2 and noun[-2] not in ["'", 'e']: return noun[:-1] return noun doc_links = get_doc_links(update) ret = {} for group, funcs in talib.get_function_groups().items(): h1 = '# %s' % unpluralize(group) h1 = h1 + ' Functions' if 'Function' not in h1 else h1 + 's' ret[group] = [h1] for func in funcs: # figure out this function's options f = Function(func) inputs = f.info['input_names'] if 'prices' in inputs: input_names = ', '.join(inputs['prices']) else: input_names = ', '.join([x for x in inputs.values() if x]) params = ', '.join([ '%s=%i' % (param, default) for param, default in f.info['parameters'].items() ]) outputs = ', '.join(f.info['output_names']) # print the header ret[group].append('### %s - %s' % (func, f.info['display_name'])) # print the code definition block ret[group].append("```") if params: ret[group].append('%s = %s(%s, %s)' % (outputs, func.upper(), input_names, params)) else: ret[group].append('%s = %s(%s)' % (outputs, func.upper(), input_names)) ret[group].append("```\n") # print extra info if we can if func in doc_links: ret[group].append( 'Learn more about the %s at [tadoc.org](%s). ' % (f.info['display_name'], doc_links[func])) ret[group].append('\n[Documentation Index](../doc_index.html)') ret[group].append('[FLOAT_RIGHTAll Function Groups](../funcs.html)') ret[group] = '\n'.join(ret[group]) + '\n' return ret
def csv_to_func(ticker, dataset_name): try: #base_path = f'./pattern_datasets/{dataset_name}' base_path = f'./pattern_datasets/{dataset_name}/Industrials' if not os.path.isdir(base_path): os.makedirs(base_path, exist_ok=True) patterns_names = talib.get_function_groups()['Pattern Recognition'] #print(patterns_names) #csv_data = pd.read_csv(f"./datasets/daily/{ticker}",index_col=0,parse_dates=True) csv_data = pd.read_csv(f"./datasets/Industrials/{ticker}", index_col=0, parse_dates=True) cnt = 1 for pattern in patterns_names: csv_data[pattern] = getattr(talib, pattern)(csv_data['Open'], csv_data['High'], csv_data['Low'], csv_data['Close']) tmp_data = csv_data #해당 구간에서 한번도 패턴이 안나온 항목들을 제외한다. zero_column = list(csv_data.columns[(csv_data == 0).all()]) not_zero_colum = list(set(patterns_names) - set(zero_column)) for pattern in not_zero_colum: #print(pattern) csv_data_bull = tmp_data[tmp_data[pattern] > 0] csv_data_bear = tmp_data[tmp_data[pattern] < 0] #각 csv_data에서 양수 인거(보통 100)은 bullish를 나타내고 음수 인거(보통 -100)은 bearish를 나타낸다. # 각 패턴이 bull version도 있고 bear 버전도 있다. 따라서 나눠야 한다. date_bull = list( map(lambda x: x.strftime("%Y-%m-%d"), csv_data_bull.index)) date_bear = list( map(lambda x: x.strftime("%Y-%m-%d"), csv_data_bear.index)) for idx, item in enumerate(date_bull): make_chart(ticker, tmp_data, item, cnt, pattern, "bull", base_path) cnt += 1 for idx, item in enumerate(date_bear): make_chart(ticker, tmp_data, item, cnt, pattern, "bear", base_path) cnt += 1 except Exception as ex: print("error occured", ex) pass
def pattern(ohlc): inputs = {"open":np.array(ohlc.open.values), "high": np.array(ohlc.high.values), "low": np.array(ohlc.low.values), "close": np.array(ohlc.close.values), "volume": np.array(ohlc.volume.values).astype(float)} dict1 = talib.get_function_groups() pattern = dict1['Pattern Recognition'] dict2 = {} dict1 ={} for item in pattern: value = eval("talib.{}(inputs['open'],inputs['high'], inputs['low'], inputs['close'])".format(item)) dict1["{}".format(item)] = value return dict1
def pattern_pref(df, khh): df = df[df["custid"] == khh] stocks = np.unique(np.asarray(df["stkcode"])) patterns = talib.get_function_groups()['Pattern Recognition'] pref_result = [] for pattern_index, pattern in enumerate(patterns): op = [0, 0.0001] pattern_type = pattern_dict[pattern_index] if not pattern_type == 'discard': for stock in stocks: temp = df[df["stkcode"] == stock] pat_op = pattern_op(temp, stock, pattern, pattern_type) op[0] += pat_op[0] op[1] += pat_op[1] pref_result.append(op[0] / op[1]) return pref_result
def get_last_day_patterns(df): candle_names = talib.get_function_groups()['Pattern Recognition'] patterns = {'bullish': [], 'bearish': []} for candle in candle_names: df[candle] = getattr(talib, candle)(df['open'], df['high'], df['low'], df['close']) if df.iloc[-1][candle] > 0: patterns['bullish'].append(candle) elif df.iloc[-1][candle] < 0: patterns['bearish'].append(candle) df = detect_fractals(df) if df.iloc[-3]['fractal'] > 0: patterns['bullish'].append('fractal') elif df.iloc[-3]['fractal'] < 0: patterns['bearish'].append('fractal') #print(df.iloc[-1]) return patterns
def Top_Pattern_recognition(dataframes, n_patterns, rep_ind): candle_names = ta.get_function_groups()['Pattern Recognition'] candle_names df = dataframes.copy() # extract OHLC op = df['Open'] hi = df['High'] lo = df['Low'] cl = df['Close'] # create columns for each pattern l = dataframes.columns cols = [ i for i in l if i != 'Open' and i != 'Close' and i != 'High' and i != 'Low' ] df.drop(cols, axis=1, inplace=True) for candle in candle_names: # below is same as; # df["CDL3LINESTRIKE"] = talib.CDL3LINESTRIKE(op, hi, lo, cl) df[candle] = getattr(ta, candle)(op, hi, lo, cl) # ============================================================================= # Ranking the patterns # We successfully extracted candlestick patterns using TA-Lib. With few lines of code, we can condense # this sparse information into a single column with pattern labels. But first, we need to handle the # cases where multiple patterns are found for a given candle. To do that, we need a performance metric # to compare patterns. We will use the “Overall performance rank” from the patternsite. # ============================================================================= cols = [ i for i in df.columns if i != 'Open' and i != 'Close' and i != 'High' and i != 'Low' ] df_abs = df.copy() df_ = df_abs.abs() df_["sum"] = df_.sum(axis=1) df['sum'] = df_['sum'] d = df[df['sum'] > 100 * rep_ind] d1 = d.sort_values(by='sum', ascending=False)[n_patterns:] d1 = d1.drop(d1.columns[d1.eq(0).all()], axis=1) return d1
def enrichTalibValues(inputDict, features, result) : headers='' for aType in features: for func in talib.get_function_groups()[aType]: pf = abstract.Function(func) try: pf.input_arrays = inputDict pf.run() outputs = pf.outputs if isinstance(outputs, list): for anItem , name in zip(outputs, pf.output_names): result = numpy.hstack((result, anItem.reshape(anItem.size,1))) headers = ','.join([headers, name]) else: result = numpy.hstack((result, outputs.reshape(outputs.size,1))) headers = ','.join([headers,func]) except: print return result, headers
def onTecFunctionSearched(self, function_ui): text = self.search_tec_function.text().strip().lower() self.tec_tree.clear() self.tec_tree = function_ui.findChild(QTreeWidget, "tec_tree") if text != "": talib_functions = talib.get_functions() for i in talib_functions: if text in i.lower(): node = QTreeWidgetItem(self.tec_tree) node.setText(0, i) else: talib_groups = talib.get_function_groups() for key in talib_groups.keys(): node = QTreeWidgetItem(self.tec_tree) node.setText(0, key) for value in talib_groups[key]: sub_node = QTreeWidgetItem(node) sub_node.setText(0, value)
def get_group_api(): # add signal lines to api mdic = dict(signal_line=dict(fun_name='signal_line', params=dict(value=0), settings=dict(shift=0), outputs=dict(real=['line', 'solid']))) # add ohlcv to api for field_name in fields: mdic[field_name] = dict(fun_name=field_name.lower(), settings=dict(shift=0), outputs=dict(real=['line', 'solid'])) mdic['Volume']['outputs']['real'][0] = 'column' # add talib functions dic = dict() # keys are group names # value are indicators in a group for key, value in talib.get_function_groups().items(): key = Rename_group_name_dic[key] if key: idic = dict() for fun_name in value: if fun_name not in Bad_indicators: name = fun_name + '(' + talib.abstract.Function( fun_name).info['display_name'] + ')' idic[name] = dict(fun_name=fun_name, settings=dict(shift=0)) idic[name]['params'] = get_parameter(fun_name) output_dic = dict() for li in list( talib.abstract.Function( fun_name).output_flags.items()): output_dic[li[0]] = ['line', 'solid'] if li[1][0] == 'Histogram': output_dic[li[0]][0] = 'column' elif li[1][0] == 'Dashed Line': output_dic[li[0]][1] = 'Dash' elif li[1][0] == 'star': output_dic[li[0]][1] = 'Dot' idic[name]['outputs'] = output_dic dic[key] = idic dic['Historical Data'] = mdic return dic
def get_all_candlesticks(df): # get all candlestick pattern functions # all functions take open, high, low, close, except # abandoned baby, dark cloud cover, evening doji star, evening star, mat hold, morning doji star, morning star, which all have # penetration=0 # penetration is percentage of penetration of one candle into another, e.g. 0.3 is 30% o, h, l, c = get_ohlc_for_talib(df) pattern_functions = talib.get_function_groups()['Pattern Recognition'] # for dataframe; cuts out CDL, so will make things like 'CDLHARAMI' just 'HARAMI' column_names = [p[3:] for p in pattern_functions] # for bearish, seems to give -100, for nothing, 0, for bullish, 100 data = {} for col, p in zip(column_names, pattern_functions): # detect candlestick pattern data[col] = getattr(talib, p)(o, h, l, c) # would get 3-week exponential moving average of pattern # data[col + '_mva_15'] = talib.EMA(data[col], timeperiod=15) cs_df = pd.DataFrame(data) cs_df.index = df.index full_df = pd.concat([df, cs_df], axis=1) return cs_df, full_df
def getPatternIndicators(df): high = df['High'] low = df['Low'] close = df['Close'] open = df['Open'] volume = df['Volume'] ''' df['2CROWS'] = ta.CDL2CROWS(open, high, low, close) df['3BLACKCROWS'] = ta.CDL3BLACKCROWS(open, high, low, close) df['3INSIDE'] = ta.CDL3INSIDE(open, high, low, close) df['3LINESTRIKE'] = ta.CDL3LINESTRIKE(open, high, low, close) df['3OUTSIDE'] = ta.CDL3OUTSIDE(open, high, low, close) df['3STARSOUTH'] = ta.CDL3STARSINSOUTH(open, high, low, close) df['3WHITESOLDIERS'] = ta.CDL3WHITESOLDIERS(open, high, low, close) df['ABANDONEDBABY'] = ta.CDLABANDONEDBABY(open, high, low, close, penetration=0) df['ADVANCEBLOCK'] = ta.CDLADVANCEBLOCK(open, high, low, close) df['BELTHOLD'] = ta.CDLBELTHOLD(open, high, low, close) df['BREAKAWAY'] = ta.CDLBREAKAWAY(open, high, low, close) ''' group = ta.get_function_groups() for i in group['Pattern Recognition']: print i method = getattr(ta, i) df[i] = method(open, high, low, close)
def get_groups_of_functions(self): ret = talib.get_function_groups() ret.pop('Math Operators') ret.pop('Math Transform') ret.pop('Statistic Functions') return ret
def get_groups_markdown(): """Generate markdown for function groups using the Abstract API Returns a dictionary of group_name -> documentation for group functions """ def unpluralize(noun): if noun.endswith('s'): if len(noun) > 2 and noun[-2] not in ["'", 'e']: return noun[:-1] return noun doc_links = get_doc_links() ret = {} for group, funcs in talib.get_function_groups().items(): h1 = '# %s' % unpluralize(group) h1 = h1 + ' Functions' if 'Function' not in h1 else h1 + 's' group_docs = [h1] for func in funcs: # figure out this function's options f = Function(func) inputs = f.info['input_names'] if 'price' in inputs and 'prices' in inputs: names = [inputs['price']] names.extend(inputs['prices']) input_names = ', '.join(names) elif 'prices' in inputs: input_names = ', '.join(inputs['prices']) else: input_names = ', '.join([x for x in inputs.values() if x]) params = ', '.join( ['%s=%i' % (param, default) for param, default in f.info['parameters'].items()]) outputs = ', '.join(f.info['output_names']) # print the header group_docs.append('### %s - %s' % (func, f.info['display_name'])) if f.function_flags and 'Function has an unstable period' in f.function_flags: group_docs.append('NOTE: The ``%s`` function has an unstable period. ' % func) # print the code definition block group_docs.append("```python") if params: group_docs.append('%s = %s(%s, %s)' % ( outputs, func.upper(), input_names, params)) else: group_docs.append('%s = %s(%s)' % ( outputs, func.upper(), input_names)) group_docs.append("```\n") # print extra info if we can if func in doc_links: group_docs.append('Learn more about the %s at [tadoc.org](%s). ' % ( f.info['display_name'], doc_links[func])) group_docs.append('\n[Documentation Index](../doc_index.html)') group_docs.append('[FLOAT_RIGHTAll Function Groups](../funcs.html)') ret[slugify(group)] = '\n'.join(group_docs) + '\n' return ret
""" indicators.py uses talib to add indicator data to market data """ import talib # @UnresolvedImport FUNCS = talib.get_functions() FUNC_GROUPS = talib.get_function_groups() from utils.logger import fx_logger log = fx_logger(__name__) log.setLevel("DEBUG") class Calculator(object): """ wrapper to allow calling of indicator functions (just ta-lib functions for now) """ def __call__(self, name, *data, **kwargs): if name in FUNCS: meth = getattr(talib, name) return meth(*data, **kwargs) else: log.error("talib does not provide {0}".format(name)) return
def supported(): return ta.get_function_groups()
_STOCK_DATA_ADDED_COLUMNS = EnumDict(open_trigger='OPEN_TRIGGER', close_trigger='CLOSE_TRIGGER') STOCK_DATA_COLUMNS = EnumDict(date='Date') + RAW_PARAMETERS + _STOCK_DATA_ADDED_COLUMNS + SUPPORTED_INDICATORS TECHNICAL_PARAMETER = RAW_PARAMETERS + SUPPORTED_INDICATORS NUMERIC_VALUE = EnumDict(numeric_value='NUMERIC_VALUE') # for technical param that is pure value (float) TRADE_DIRECTIONS = EnumDict(long='LONG', short='SHORT') """ The next Enum class contains all the order relations between indicators. """ RELATIONS = EnumDict(greater='GREATER', less='LESS', crossover='CROSSOVER', crossover_below='CROSSOVER_BELOW', crossover_above='CROSSOVER_ABOVE') # TA-Lib technical indicators by groups INDICATORS_GROUPS = EnumDict(zip([k.lower().replace(' ', '_') for k in talib.get_function_groups().keys()], [k for k in talib.get_function_groups().keys()])) CYCLE_INDICATORS = EnumDict(zip([ind.lower() for ind in talib.get_function_groups().get(INDICATORS_GROUPS.cycle_indicators)], [ind for ind in talib.get_function_groups().get(INDICATORS_GROUPS.cycle_indicators)])) MOMENTUM_INDICATORS = EnumDict(zip([ind.lower() for ind in talib.get_function_groups().get(INDICATORS_GROUPS.momentum_indicators)], [ind for ind in talib.get_function_groups().get(INDICATORS_GROUPS.momentum_indicators)])) OVERLAP_STUDIES_INDICATORS = EnumDict(zip([ind.lower() for ind in talib.get_function_groups().get(INDICATORS_GROUPS.overlap_studies)], [ind for ind in talib.get_function_groups().get(INDICATORS_GROUPS.overlap_studies)])) VOLATILITY_INDICATORS = EnumDict(zip([ind.lower() for ind in talib.get_function_groups().get(INDICATORS_GROUPS.volatility_indicators)], [ind for ind in talib.get_function_groups().get(INDICATORS_GROUPS.volatility_indicators)])) VOLUME_INDICATORS = EnumDict(zip([ind.lower() for ind in talib.get_function_groups().get(INDICATORS_GROUPS.volume_indicators)], [ind for ind in talib.get_function_groups().get(INDICATORS_GROUPS.volume_indicators)])) # PRICE_TRANSFORM = EnumDict(zip([ind.lower() for ind in talib.get_function_groups().get(INDICATORS_GROUPS.price_transform)], # [ind for ind in talib.get_function_groups().get(INDICATORS_GROUPS.price_transform)]))
# # d/dt (ema3-ema7) # ematimesd37 = noNaN(np.c_[ times[1:], (ema3[1:]-ema7[:-1])/inputs['close'][1:] ], 1) # addToDict(ematimesd37, ticker, "EMAD37") # know sure thing http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst ksttimes = noNaN(np.c_[ times, ind.kst(inputs['close']) ], 1) addToDict(ksttimes, ticker, "KST") # all indicators below indicators = ["BBANDS","DEMA","EMA","SAR","SAREXT", "ADX","ADXR","APO","AROON", \ "AROONOSC","BOP","CCI","CMO","DX","MACD","MACDEXT", "MACDFIX","MFI", \ "MINUS_DI","MINUS_DM","MOM","ROC", "RSI","STOCH","STOCHF","STOCHRSI", \ "WILLR","AD","ADOSC","HT_DCPERIOD","HT_DCPHASE", "HT_SINE","NATR","TRANGE" ] functiongroups = ta.get_function_groups() for fname in indicators: fn = ta.abstract.Function(fname) # skip candlesticks crap if(fname in functiongroups['Pattern Recognition']): continue params = {} outputs = fn(inputs, **params) numoutputs = len(fn.info['output_names']) norm = 1.0 if(fname in functiongroups['Overlap Studies']): norm = inputs['close'] # normalize these indicators via closing price if(fname in functiongroups['Price Transform']): norm = inputs['close']
def get_func_list(): talib_func = talib.get_function_groups() return talib_func
bbands = Function('bbands', input_arrays) bbands.set_function_parameters(timePeriod=20, nbDevUp=2, nbDevDown=2) upper, middle, lower = bbands() # multiple output values unpacked (these will always have the correct order) kama = Function('kama').run(input_arrays) # alternative run() calling method. plot(odata, upper, middle, lower, kama) def plot(odata, upper, middle, lower, kama): pylab.plot(r, idata, 'b-', label="original") pylab.plot(r, odata, 'g-', label="MA") pylab.plot(r, upper, 'r-', label="Upper") pylab.plot(r, middle, 'r-', label="Middle") pylab.plot(r, lower, 'r-', label="Lower") pylab.plot(r, kama, 'g', label="KAMA") pylab.legend() pylab.show() if __name__ == '__main__': print 'All functions (sorted by group):' groups = talib.get_function_groups() for group, functions in sorted(groups.items()): print '%s functions: ' % group, functions if len(sys.argv) == 1 or sys.argv[1] == 'func': print 'Using talib.func' func_example() else: print 'Using talib.abstract' abstract_example()