def test_is_date_today__not_today__today(self): date_time = "07.03.2018 um 23:11" datetime_object = datetime.strptime(date_time, "%d.%m.%Y um %H:%M") self.assertEqual(CommonUtils.is_date_today(datetime_object), False) test = datetime.now() # TODO testen der konvertierung mittels strptime "um" # date_time = "14.03.2018 um 23:11" # datetime_object = datetime.strptime(date_time, "%d.%m.%Y um %H:%M") self.assertEqual(CommonUtils.is_date_today(test), True)
def read_tickers_from_file_or_web(stock_data_container_file, reload_file=False, dict_with_stock_pages_to_read={}): """ TODO read the sp500 and CDAX tickers and saves it to given file :param dict_with_stock_pages_to_read: :param stock_data_container_file: :param stock_exchange_file: :param names_file: :param reload_file: reload the tickers :param tickers_file: file to save the tickers :return: stock_data_container_list """ # TODO: # https://de.wikipedia.org/wiki/Liste_von_Aktienindizes # https://de.wikipedia.org/wiki/EURO_STOXX_50#Zusammensetzung stock_data_container_list = [] if not os.path.exists(stock_data_container_file) or reload_file: logger.info("Start reading tickers...") pool = CommonUtils.get_threading_pool() list_w = dict_with_stock_pages_to_read.values() result_list = pool.map(CommonUtils.read_table_columns_from_webpage_list, list_w) for result in result_list: stock_data_container_list.extend(result) # TODO: b) General Standard is not included of page: # http://topforeignstocks.com/stock-lists/the-list-of-listed-companies-in-germany/ # TODO temp disabled: wartung # TODO: http://www.boerse-online.de/index/liste/cdax # no tickers symbols available, column 2 contains security (=name) # all_names += read_table_column_from_wikipedia( # 'https://de.wikipedia.org/wiki/Liste_der_im_CDAX_gelisteten_Aktien', # 'wikitable sortable zebra', 2) # from DataRead_Google_Yahoo import __get_symbols_from_names # all_exchanges = [] # all_exchanges += list(repeat("de", len(all_names))) # tickers, names_with_symbols = __get_symbols_from_names (all_names, all_exchanges) # stock_tickers_names['tickers'] += tickers # stock_tickers_names['_names'] += names_with_symbols # stock_tickers_names['_stock_exchange'] += list(repeat("de", len(names_with_symbols))) with open(stock_data_container_file, "wb") as f: pickle.dump(stock_data_container_list, f) else: with open(stock_data_container_file, "rb") as f: stock_data_container_list += pickle.load(f) return stock_data_container_list
def test_partial_data_in_dict(self): stock_data_file = GlobalVariables.get_data_files_path( ) + "stock_data_container_file.pickle" req_params = StrategyFactory.get_required_parameters_with_default_parameters( ) missing_strategy_parameter_dict = { 'W52HighTechnicalStrategy': { 'check_days': 7, 'min_cnt': 3, 'min_vol_dev_fact': 1.2, 'within52w_high_fact': 0.98, 'data_readers': { 'HistoricalDataReader': { 'weeks_delta': 52, 'data_source': 'iex', 'reload_data': False, 'ticker_needed': True } } } } other_params = { 'stock_data_container_file': stock_data_file, 'dict_with_stock_pages_to_read': { 'SP500': { 'websource_address': "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies", 'find_name': 'table', 'class_name': 'class', 'table_class': 'wikitable sortable', 'ticker_column_to_read': 0, 'name_column_to_read': 1, 'stock_exchange': 'en' } }, 'RiskModels': { 'FixedSizeRiskModel': { 'OrderTarget': 'order_target_value', 'TargetValue': 2500 } } } all_strategy_parameters_dict = { 'Strategies': missing_strategy_parameter_dict } all_strategy_parameters_dict.update({"OtherParameters": other_params}) self.assertFalse( CommonUtils.have_dicts_same_shape( req_params['Strategies']['W52HighTechnicalStrategy'], missing_strategy_parameter_dict))
def _read_objects_as_dict_recursive(self, params, my_col, my_row): """ Reads the objects from params and returns the parameters as dictionary. :param params: :param my_col: :param my_row: :return: """ my_col_2 = my_col all_txt = {} if isinstance(params, dict): for key in params.keys(): my_col_2 = my_col my_row = my_row + 1 txt_var = key["text"] my_col_2 = my_col_2 + 1 my_col_2, my_row, txt = self._read_objects_as_dict_recursive( params[key], my_col_2, my_row) all_txt.update({txt_var: txt}) else: txt_entry = params.get() if CommonUtils.is_int(txt_entry): all_txt = int(txt_entry) elif CommonUtils.is_float(txt_entry): all_txt = float(txt_entry) elif isinstance(txt_entry, str): if txt_entry in 'True': all_txt = True else: if txt_entry in 'False': all_txt = False else: all_txt = txt_entry my_row = my_row + 1 return my_col_2, my_row, all_txt
def accept_parameters_from_text(self, params_dict, required_parameters): """ Method to accept the changes in the scrolled text for the parameters, if the shape and keys of parameters dict is same as required parameters dict. :return: True, if parameters are valid and updated. """ try: if isinstance(params_dict, dict): for param_key in params_dict.keys(): if not param_key in required_parameters.keys() or len( params_dict[param_key]) <= 0: logger.error( "Parameter keys faulty, please insert correct parameters!" ) return False if not CommonUtils.have_dicts_same_shape( required_parameters, params_dict): logger.error( "Parameter shapes are faulty, please insert correct parameters!" ) return False self.model.analysis_parameters.clear() self.model.analysis_parameters.update(params_dict) self.model.available_strategies_list.clear() for item in params_dict['Strategies']: self.model.available_strategies_list.append(item) logger.info("Analysis parameters Read") else: logger.error( "Parameters are no dict, please insert correct parameters!" ) return False except Exception as e: logger.error("Exception while opening result stock: " + str(e) + "\n" + str(traceback.format_exc())) return False return True
def read_tickers_from_web(stock_data_container_file, dict_with_stock_pages_to_read={}): """ Read the gives list of stock pages :param dict_with_stock_pages_to_read: :param stock_data_container_file: :return: stock_data_container_list """ stock_data_container_list = [] logger.info("Start reading tickers from web...") pool = CommonUtils.get_threading_pool() list_w = dict_with_stock_pages_to_read.values() result_list = pool.map(CommonUtils.read_table_columns_from_webpage_list, list_w) for result in result_list: stock_data_container_list.extend(result) with open(stock_data_container_file, "wb") as f: pickle.dump(stock_data_container_list, f) return stock_data_container_list
df1 = convert_backtrader_to_dataframe(hist_data) print() pass # Variable for our starting cash startcash = 10000 # Create an instance of cerebro cerebro = bt.Cerebro() # Add our strategy cerebro.addstrategy(firstStrategy) symbols = CommonUtils.read_table_columns_from_webpage_as_list( "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies", "table", "class", "wikitable sortable", 0, 1, "en") # symbols = ["AAPL" ,"FB","GIS","GE", "XOM"] start_time = datetime.now() end = datetime.now() start = (end - timedelta(weeks=52)) start_time = datetime.now() # plot_symbols = [] data_list = [] for s in symbols: data = bt.feeds.Quandl(dataname=s, fromdate=start, todate=end) cerebro.adddata(data)
def test_get_implemented_items_dict(self): path = Path(os.path.dirname(os.path.abspath(__file__))) test_dict = CommonUtils.get_implemented_items_dict( path, './*/**/**/*.py', "TestClassForUtils") self.assertEqual(1, len(test_dict)) self.assertEqual("TestClassForUtils", list(test_dict.keys())[0])
def identify_stock_name_and_stock_ticker_and_target_price_from_news_nltk_german_classifier( self, single_news_to_analyze): """ Identifies a stock name within a news and returns the name and ticker :param single_news_to_analyze: news text itself :return: {'name': name_to_find, 'ticker': self.tickers[idx]} or " " if no name found """ if single_news_to_analyze is None: raise NotImplementedError preprocessed_news = self.optimize_text_for_german_tagger( single_news_to_analyze) # TODO: http://dsspace.wzb.eu/pyug/text_proc_feature_extraction/ tokens = nltk.word_tokenize(preprocessed_news, language="german") tokens_removed_words = [ t for t in tokens if t.lower() not in self.stopwords ] # http: // dsspace.wzb.eu / pyug / text_proc_feature_extraction / # tmp_tokens = {} # for doc_label, doc_tok in tokens_removed_words: # tmp_tokens[doc_label] = [] # for t in doc_tok: # t_parts = self.expand_compound_token(t) # tmp_tokens[doc_label].extend(t_parts) tags = self.german_tagger.tag(tokens_removed_words) noun_tags = "" enable_tags = False for i in range(len(tags)): # TODO gscheider: erst nach dem ersten verb lesen if tags[i][1].startswith("V"): enable_tags = True if enable_tags: if tags[i][1].startswith("N"): if i > 1 and tags[i - 1][1].startswith("ADJ"): noun_tags = (tags[i - 1][0] + " " + tags[i][0]) break if tags[i][1].startswith("NE") or tags[i][1].startswith( "NN"): noun_tags = (tags[i][0]) break if noun_tags is not None and len(noun_tags) > 0: name_return = "" target_price_return = 0 ticker_return = "" stock_exchange_return = "" stock_to_check = noun_tags # [0] --> first tag in list price_tuple = [i for i in tags if i[1].startswith("CARD")] try: name_to_find = self.lookup_stock_abr_in_all_names( stock_to_check) idx = self._names.index(name_to_find) name_return = self._names[idx] ticker_return = self.tickers[idx] stock_exchange_return = self.stock_exchanges[idx] except Exception as e: # look up symbol in web instead of list try: name_return, ticker_return = get_symbol_and_real_name_from_abbrev_name_from_topforeignstocks( stock_to_check) except Exception as e: (", No STOCK found for news: " + str(single_news_to_analyze)) return None if len(price_tuple) > 0: price = price_tuple[len(price_tuple) - 1][0] # TODO 1: comment price = price.replace(",", ".") # replace german comma if CommonUtils.is_float(price): # price_tuple: [0] --> number, [1]--> CD target_price_return = float(price) # news_dec = NewsDataContainerDecorator(StockDataContainer(name_return, ticker_return, stock_exchange_return), # target_price_return, "", single_news_to_analyze, 0) ret = StockNameTickerExchangeAndTargetPrize( name_return, ticker_return, stock_exchange_return, target_price_return) return ret logger.info("No STOCK found for news: " + str(single_news_to_analyze)) return None
def get_implemented_classes(self): classes_dict = CommonUtils.get_implemented_items_dict( self.file_path, self.path_pattern, self.keyword) return classes_dict
def __init__(self): self.pool = CommonUtils.get_threading_pool()
def test_data_in_dict(self): stock_data_file = GlobalVariables.get_data_files_path( ) + "stock_data_container_file.pickle" all_strategy_parameters_dict = { 'SimplePatternNewsStrategy': { 'news_threshold': 0.7, 'german_tagger': GlobalVariables.get_data_files_path() + 'nltk_german_classifier_data.pickle', 'data_readers': { 'TraderfoxNewsDataReader': { 'last_check_date_file': GlobalVariables.get_data_files_path() + 'TestData\\last_date_time.csv', 'german_tagger': GlobalVariables.get_data_files_path() + 'nltk_german_classifier_data.pickle', 'reload_data': True, 'ticker_needed': False }, 'HistoricalDataReader': { 'weeks_delta': 52, 'data_source': 'iex', 'reload_data': True, 'ticker_needed': False } } }, 'W52HighTechnicalStrategy': { 'check_days': 7, 'min_cnt': 3, 'min_vol_dev_fact': 1.2, 'within52w_high_fact': 0.98, 'data_readers': { 'HistoricalDataReader': { 'weeks_delta': 52, 'data_source': 'iex', 'reload_data': False, 'ticker_needed': True } } }, 'GapUpHighVolumeStrategy': { 'min_gap_factor': 1.03 } } other_params = { 'stock_data_container_file': stock_data_file, 'dict_with_stock_pages_to_read': { 'SP500': { 'websource_address': "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies", 'find_name': 'table', 'class_name': 'class', 'table_class': 'wikitable sortable', 'ticker_column_to_read': 0, 'name_column_to_read': 1, 'stock_exchange': 'en' }, 'DAX': { 'websource_address': "http://topforeignstocks.com/stock-lists/the-list-of-listed-companies-in-germany/", 'find_name': 'tbody', 'class_name': 'class', 'table_class': 'row-hover', 'ticker_column_to_read': 2, 'name_column_to_read': 1, 'stock_exchange': 'de' } }, 'RiskModels': { 'FixedSizeRiskModel': { 'OrderTarget': 'order_target_value', 'TargetValue': 2500 } }, 'AutoTrading': { 'RepetitiveScreeningInterval': 120, 'MaxNumberOfDifferentStocksToBuyPerAutoTrade': 5 }, 'Broker': { 'Name': 'IBPyInteractiveBrokers' } } backtesting_parameters = { 'BacktestingFramework': 'BacktraderWrapper', 'initial_cash': 30000, 'trade_commission_percent': 0.005 } all_strategy_parameters_dict = { 'Strategies': all_strategy_parameters_dict } all_strategy_parameters_dict.update({"OtherParameters": other_params}) all_strategy_parameters_dict.update( {"BacktestingParameters": backtesting_parameters}) req_params = StrategyFactory.get_required_parameters_with_default_parameters( ) self.assertTrue( CommonUtils.have_dicts_same_shape(req_params, all_strategy_parameters_dict)) # key "news" instead of "news_threshold" corrupted_strategy_parameter_dict = { 'SimplePatternNewsStrategy': { 'news': 0.7, 'german_tagger': GlobalVariables.get_data_files_path() + 'nltk_german_classifier_data.pickle', 'data_readers': { 'TraderfoxNewsDataReader': { 'last_check_date_file': GlobalVariables.get_data_files_path() + 'TestData\\last_date_time.csv', 'german_tagger': GlobalVariables.get_data_files_path() + 'nltk_german_classifier_data.pickle', 'reload_data': True, 'ticker_needed': False }, 'HistoricalDataReader': { 'weeks_delta': 52, 'data_source': 'iex', 'reload_data': True, 'ticker_needed': False } } }, 'W52HighTechnicalStrategy': { 'check_days': 7, 'min_cnt': 3, 'min_vol_dev_fact': 1.2, 'within52w_high_fact': 0.98, 'data_readers': { 'HistoricalDataReader': { 'weeks_delta': 52, 'data_source': 'iex', 'reload_data': False, 'ticker_needed': True } } } } self.assertFalse( CommonUtils.have_dicts_same_shape( req_params, corrupted_strategy_parameter_dict)) # missing 'SimplePatternNewsStrategy' missing_strategy_parameter_dict = { 'W52HighTechnicalStrategy': { 'check_days': 7, 'min_cnt': 3, 'min_vol_dev_fact': 1.2, 'within52w_high_fact': 0.98, 'data_readers': { 'HistoricalDataReader': { 'weeks_delta': 52, 'data_source': 'iex', 'reload_data': False, 'ticker_needed': True } } } } self.assertFalse( CommonUtils.have_dicts_same_shape(req_params, missing_strategy_parameter_dict))