def skew(other_args: List[str], s_ticker: str, s_interval: str, df_stock: pd.DataFrame): """Skewness Indicator Parameters ---------- other_args:List[str] Argparse arguments s_ticker: str Ticker s_interval: str Stock time interval df_stock: pd.DataFrame Dataframe of prices """ parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="skew", description=""" Skewness is a measure of asymmetry or distortion of symmetric distribution. It measures the deviation of the given distribution of a random variable from a symmetric distribution, such as normal distribution. A normal distribution is without any skewness, as it is symmetrical on both sides. Hence, a curve is regarded as skewed if it is shifted towards the right or the left. """, ) parser.add_argument( "-l", "--length", action="store", dest="n_length", type=check_positive, default=14, help="length", ) parser.add_argument( "-o", "--offset", action="store", dest="n_offset", type=check_positive, default=0, help="offset", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return # Daily if s_interval == "1440min": df_ta = ta.skew( close=df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() # Intraday else: df_ta = ta.skew( close=df_stock["Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() fig, axes = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI) ax = axes[0] ax.set_title(f"{s_ticker} Skewness Indicator") if s_interval == "1440min": ax.plot(df_stock.index, df_stock["Adj Close"].values, "fuchsia", lw=1) else: ax.plot(df_stock.index, df_stock["Close"].values, "fuchsia", lw=1) ax.set_xlim(df_stock.index[0], df_stock.index[-1]) ax.set_ylabel("Share Price ($)") ax.grid(b=True, which="major", color="#666666", linestyle="-") ax.minorticks_on() ax.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) ax2 = axes[1] ax2.plot(df_ta.index, df_ta.values, "b", lw=2, label="skew") ax2.set_xlim(df_stock.index[0], df_stock.index[-1]) ax2.grid(b=True, which="major", color="#666666", linestyle="-") ax2.minorticks_on() ax2.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) if gtff.USE_ION: plt.ion() plt.gcf().autofmt_xdate() fig.tight_layout(pad=1) plt.legend() plt.show() print("") except Exception as e: print(e, "\n")
def call_view(self, other_args: List[str]): """Process view command""" parser = argparse.ArgumentParser( add_help=False, prog="view", description="""View available presets under presets folder.""", ) parser.add_argument( "-p", "--preset", action="store", dest="preset", type=str, help="View specific preset", default="", choices=self.preset_choices, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-p") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if ns_parser.preset: preset_filter = configparser.RawConfigParser() preset_filter.optionxform = str # type: ignore preset_filter.read(presets_path + ns_parser.preset + ".ini") filters_headers = [ "General", "Date", "TransactionFiling", "Industry", "InsiderTitle", "Others", "CompanyTotals", ] console.print("") for filter_header in filters_headers: console.print(f" - {filter_header} -") d_filters = {**preset_filter[filter_header]} d_filters = {k: v for k, v in d_filters.items() if v} if d_filters: max_len = len(max(d_filters, key=len)) for key, value in d_filters.items(): console.print( f"{key}{(max_len-len(key))*' '}: {value}") console.print("") else: for preset in self.preset_choices: with open( presets_path + preset + ".ini", encoding="utf8", ) as f: description = "" for line in f: if line.strip() == "[General]": break description += line.strip() console.print(f"\nPRESET: {preset}") console.print( description.split("Description: ")[1].replace("#", "")) console.print("")
def view_signals(other_args: List[str]): """View list of available signals Parameters ---------- other_args : List[str] Command line arguments to be processed with argparse """ parser = argparse.ArgumentParser( add_help=False, prog="signals", description=""" Prints list of available signals. [Source: Finviz] """, ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return print("top_gainers stocks with the highest %% price gain today") print("top_losers stocks with the highest %% price loss today.") print("new_high stocks making 52-week high today") print("new_low stocks making 52-week low today") print( "most_volatile stocks with the highest widest high/low trading range today" ) print("most_active stocks with the highest trading volume today") print( "unusual_volume stocks with unusually high volume today - the highest relative volume ratio" ) print( "overbought stock is becoming overvalued and may experience a pullback." ) print( "oversold oversold stocks may represent a buying opportunity for investors." ) print("downgrades stocks downgraded by analysts today") print("upgrades stocks upgraded by analysts today.") print( "earnings_before companies reporting earnings today, before market open" ) print( "earnings_after companies reporting earnings today, after market close" ) print("recent_insider_buying stocks with recent insider buying activity") print("recent_insider_selling stocks with recent insider selling activity") print("major_news stocks with the highest news coverage today") print( "horizontal_sr horizontal channel of price range between support and resistance trendlines" ) print("tl_resistance once a rising trendline is broken") print("tl_support once a falling trendline is broken") print( "wedge_up upward trendline support and upward trendline resistance (reversal)" ) print( "wedge_down downward trendline support and downward trendline resistance (reversal)" ) print( "wedge upward trendline support, downward trendline resistance (contiunation)" ) print( "triangle_ascending upward trendline support and horizontal trendline resistance" ) print( "triangle_descending horizontal trendline support and downward trendline resistance" ) print( "channel_up both support and resistance trendlines slope upward" ) print( "channel_down both support and resistance trendlines slope downward" ) print( "channel both support and resistance trendlines are horizontal" ) print( "double_top stock with 'M' shape that indicates a bearish reversal in trend" ) print( "double_bottom stock with 'W' shape that indicates a bullish reversal in trend" ) print("multiple_top same as double_top hitting more highs") print("multiple_bottom same as double_bottom hitting more lows") print( "head_shoulders chart formation that predicts a bullish-to-bearish trend reversal" ) print( "head_shoulders_inverse chart formation that predicts a bearish-to-bullish trend reversal" ) print("") except Exception as e: print(e) print("") return
def spread( other_args: List[str], s_ticker: str, s_interval: str, df_stock: pd.DataFrame ): """Standard Deviation & Variance Parameters ---------- other_args:List[str] Argparse arguments s_ticker: str Ticker s_interval: str Stock time interval df_stock: pd.DataFrame Dataframe of prices """ parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="spread", description=""" """, ) parser.add_argument( "-l", "--length", action="store", dest="n_length", type=check_positive, default=14, help="length", ) parser.add_argument( "-o", "--offset", action="store", dest="n_offset", type=check_positive, default=0, help="offset", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return # Daily if s_interval == "1440min": df_ta = ta.stdev( close=df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() df_ta_ = ta.variance( close=df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() # Intraday else: df_ta = ta.stdev( close=df_stock["Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() df_ta_ = ta.variance( close=df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() fig, axes = plt.subplots(3, 1, figsize=plot_autoscale(), dpi=PLOT_DPI) ax = axes[0] ax.set_title(f"{s_ticker} Spread") if s_interval == "1440min": ax.plot(df_stock.index, df_stock["Adj Close"].values, "fuchsia", lw=1) else: ax.plot(df_stock.index, df_stock["Close"].values, "fuchsia", lw=1) ax.set_xlim(df_stock.index[0], df_stock.index[-1]) ax.set_ylabel("Share Price ($)") ax.yaxis.set_label_position("right") ax.grid(b=True, which="major", color="#666666", linestyle="-") ax.minorticks_on() ax.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) ax1 = axes[1] ax1.plot(df_ta.index, df_ta.values, "b", lw=1, label="stdev") ax1.set_xlim(df_stock.index[0], df_stock.index[-1]) ax1.set_ylabel("Stdev") ax1.yaxis.set_label_position("right") ax1.grid(b=True, which="major", color="#666666", linestyle="-") ax1.minorticks_on() ax1.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) ax2 = axes[2] ax2.plot(df_ta_.index, df_ta_.values, "g", lw=1, label="variance") ax2.set_xlim(df_stock.index[0], df_stock.index[-1]) ax2.set_ylabel("Variance") ax2.yaxis.set_label_position("right") ax2.grid(b=True, which="major", color="#666666", linestyle="-") ax2.minorticks_on() ax2.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) if gtff.USE_ION: plt.ion() plt.gcf().autofmt_xdate() fig.tight_layout(pad=1) plt.show() print("") except Exception as e: print(e, "\n")
def sean_seah_warnings(other_args: List[str], ticker: str): """Display Sean Seah warnings Parameters ---------- other_args : List[str] argparse other args ticker : str Stock ticker """ parser = argparse.ArgumentParser( add_help=False, prog="warnings", description=""" Sean Seah warnings. Check: Consistent historical earnings per share; Consistently high return on equity; Consistently high return on assets; 5x Net Income > Long-Term Debt; and Interest coverage ratio more than 3. See https://www.drwealth.com/gone-fishing-with-buffett-by-sean-seah/comment-page-1/ [Source: Market Watch] """, ) parser.add_argument( "-i", "--info", action="store_true", default=False, dest="b_info", help="provide more information about Sean Seah warning rules.", ) parser.add_argument( "-d", "--debug", action="store_true", default=False, dest="b_debug", help="print insights into warnings calculation.", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return if ns_parser.b_info: filepath = "fundamental_analysis/info_sean_seah.txt" with open(filepath) as fp: line = fp.readline() while line: print(f"{line.strip()}") line = fp.readline() print("") # From INCOME STATEMENT, get: 'EPS (Basic)', 'Net Income', 'Interest Expense', 'EBITDA' url_financials = ( f"https://www.marketwatch.com/investing/stock/{ticker}/financials/income" ) text_soup_financials = BeautifulSoup( requests.get(url_financials, headers={ "User-Agent": get_user_agent() }).text, "lxml", ) # Define financials columns a_financials_header = list() for financials_header in text_soup_financials.findAll( "th", {"class": "overflow__heading"}): a_financials_header.append( financials_header.text.strip("\n").split("\n")[0]) df_financials = pd.DataFrame(columns=a_financials_header[0:-1]) # Add financials values soup_financials = text_soup_financials.findAll( lambda tag: tag.name == "tr" and tag.get("class" ) == ["table__row"]) soup_financials += text_soup_financials.findAll( "tr", {"class": "table__row is-highlighted"}) for financials_info in soup_financials: financials_row = financials_info.text.split("\n") if len(financials_row) > 5: for item in financials_row: if bool(re.search(r"\d", item)): a_financials_info = financials_info.text.split("\n") l_financials = [a_financials_info[2]] l_financials.extend(a_financials_info[5:-2]) # Append data values to financials df_financials.loc[len( df_financials.index)] = l_financials break # Set item name as index df_financials = df_financials.set_index("Item") df_sean_seah = df_financials.loc[[ "EPS (Basic)", "Net Income", "Interest Expense", "EBITDA" ]] # From BALANCE SHEET, get: 'Liabilities & Shareholders\' Equity', 'Long-Term Debt' url_financials = f"https://www.marketwatch.com/investing/stock/{ticker}/financials/balance-sheet" text_soup_financials = BeautifulSoup( requests.get(url_financials, headers={ "User-Agent": get_user_agent() }).text, "lxml", ) # Define financials columns a_financials_header = list() for financials_header in text_soup_financials.findAll( "th", {"class": "overflow__heading"}): a_financials_header.append( financials_header.text.strip("\n").split("\n")[0]) s_header_end_trend = "5-year trend" df_financials = pd.DataFrame( columns=a_financials_header[0:a_financials_header. index(s_header_end_trend)]) # Add financials values soup_financials = text_soup_financials.findAll( lambda tag: tag.name == "tr" and tag.get("class" ) == ["table__row"]) soup_financials += text_soup_financials.findAll( "tr", {"class": "table__row is-highlighted"}) for financials_info in soup_financials: financials_row = financials_info.text.split("\n") if len(financials_row) > 5: for item in financials_row: if bool(re.search(r"\d", item)): a_financials_info = financials_info.text.split("\n") l_financials = [a_financials_info[2]] l_financials.extend(a_financials_info[5:-2]) # Append data values to financials df_financials.loc[len( df_financials.index)] = l_financials break # Set item name as index df_financials = df_financials.set_index("Item") # Create dataframe to compute meaningful metrics from sean seah book df_sean_seah = df_sean_seah.append(df_financials.loc[[ "Total Shareholders' Equity", "Liabilities & Shareholders' Equity", "Long-Term Debt", ]]) # Clean these metrics by parsing their values to float df_sean_seah = df_sean_seah.applymap( lambda x: clean_data_values_to_float(x)) # Add additional necessary metrics series = (df_sean_seah.loc["Net Income"] / df_sean_seah.loc["Total Shareholders' Equity"]) series.name = "ROE" df_sean_seah = df_sean_seah.append(series) series = df_sean_seah.loc["EBITDA"] / df_sean_seah.loc[ "Interest Expense"] series.name = "Interest Coverage Ratio" df_sean_seah = df_sean_seah.append(series) series = (df_sean_seah.loc["Net Income"] / df_sean_seah.loc["Liabilities & Shareholders' Equity"]) series.name = "ROA" df_sean_seah = df_sean_seah.append(series) print( df_sean_seah.applymap(lambda x: int_or_round_float(x)).to_string()) n_warnings = 0 print("\nWARNINGS:") if np.any(df_sean_seah.loc["EPS (Basic)"].diff().dropna().values < 0): print("NO consistent historical earnings per share") n_warnings += 1 if ns_parser.b_debug: sa_eps = np.array2string( df_sean_seah.loc["EPS (Basic)"].values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print(f" EPS: {sa_eps}") sa_growth = np.array2string( df_sean_seah.loc["EPS (Basic)"].diff().dropna().values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print(f" Growth: {sa_growth} < 0") if np.any(df_sean_seah.loc["ROE"].values < 0.15): print("NOT consistently high return on equity") n_warnings += 1 if ns_parser.b_debug: sa_roe = np.array2string( df_sean_seah.loc["ROE"].values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print(f" ROE: {sa_roe} < 0.15") if np.any(df_sean_seah.loc["ROA"].values < 0.07): print("NOT consistently high return on assets") n_warnings += 1 if ns_parser.b_debug: sa_roa = np.array2string( df_sean_seah.loc["ROA"].values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print(f" ROA: {sa_roa} < 0.07") if np.any(df_sean_seah.loc["Long-Term Debt"].values > 5 * df_sean_seah.loc["Net Income"].values): print("5x Net Income < Long-Term Debt") n_warnings += 1 if ns_parser.b_debug: sa_net_income = np.array2string( df_sean_seah.loc["Net Income"].values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print(f" NET Income: {sa_net_income}") sa_5_long_term_debt = np.array2string( 5 * df_sean_seah.loc["Long-Term Debt"].values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print( f" lower than 5x Long-Term Debt: {sa_5_long_term_debt}") if np.any(df_sean_seah.loc["Interest Coverage Ratio"].values < 3): print("Interest coverage ratio less than 3") n_warnings += 1 if ns_parser.b_debug: sa_interest_coverage_ratio = np.array2string( 100 * df_sean_seah.loc["Interest Coverage Ratio"].values, formatter={"float_kind": lambda x: int_or_round_float(x)}, ) print( f" Interest Coverage Ratio: {sa_interest_coverage_ratio} < 3" ) if n_warnings == 0: print("None. Good stonk") print("") except Exception as e: print(e) print("") return
def call_ema_cross(self, other_args: List[str]): """Call EMA Cross strategy""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="ema_cross", description= "Cross between a long and a short Exponential Moving Average.", ) parser.add_argument( "-l", "--long", default=50, dest="long", type=check_positive, help="Long EMA period", ) parser.add_argument( "-s", "--short", default=20, dest="short", type=check_positive, help="Short EMA period", ) parser.add_argument( "--spy", action="store_true", default=False, help="Flag to add spy hold comparison", dest="spy", ) parser.add_argument( "--no_bench", action="store_true", default=False, help="Flag to not show buy and hold comparison", dest="no_bench", ) parser.add_argument( "--no_short", action="store_false", default=True, dest="shortable", help="Flag that disables the short sell", ) ns_parser = parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED) if ns_parser: if ns_parser.long < ns_parser.short: print("Short EMA period is longer than Long EMA period\n") bt_view.display_ema_cross( ticker=self.ticker, df_stock=self.stock, short_ema=ns_parser.short, long_ema=ns_parser.long, spy_bt=ns_parser.spy, no_bench=ns_parser.no_bench, shortable=ns_parser.shortable, export=ns_parser.export, )
def call_chart(self, other_args): """Process chart command""" if self.current_coin: parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="chart", description= """Display chart for loaded coin. You can specify currency vs which you want to show chart and also number of days to get data for.""", ) if self.source == "cp": parser.add_argument( "--vs", default="usd", dest="vs", help="Currency to display vs coin", choices=["usd", "btc", "BTC", "USD"], type=str, ) parser.add_argument( "-d", "--days", default=30, dest="days", help="Number of days to get data for", type=check_positive, ) if self.source == "cg": parser.add_argument("--vs", default="usd", dest="vs", help="Currency to display vs coin") parser.add_argument( "-d", "--days", default=30, dest="days", help="Number of days to get data for", ) if self.source == "bin": client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET) interval_map = { "1day": client.KLINE_INTERVAL_1DAY, "3day": client.KLINE_INTERVAL_3DAY, "1hour": client.KLINE_INTERVAL_1HOUR, "2hour": client.KLINE_INTERVAL_2HOUR, "4hour": client.KLINE_INTERVAL_4HOUR, "6hour": client.KLINE_INTERVAL_6HOUR, "8hour": client.KLINE_INTERVAL_8HOUR, "12hour": client.KLINE_INTERVAL_12HOUR, "1week": client.KLINE_INTERVAL_1WEEK, "1min": client.KLINE_INTERVAL_1MINUTE, "3min": client.KLINE_INTERVAL_3MINUTE, "5min": client.KLINE_INTERVAL_5MINUTE, "15min": client.KLINE_INTERVAL_15MINUTE, "30min": client.KLINE_INTERVAL_30MINUTE, "1month": client.KLINE_INTERVAL_1MONTH, } _, quotes = binance_model.show_available_pairs_for_given_symbol( self.current_coin) parser.add_argument( "--vs", help="Quote currency (what to view coin vs)", dest="vs", type=str, default="USDT", choices=quotes, ) parser.add_argument( "-i", "--interval", help="Interval to get data", choices=list(interval_map.keys()), dest="interval", default="1day", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", default=100, help="Number to get", type=check_positive, ) if self.source == "cb": interval_map = { "1min": 60, "5min": 300, "15min": 900, "1hour": 3600, "6hour": 21600, "24hour": 86400, "1day": 86400, } _, quotes = coinbase_model.show_available_pairs_for_given_symbol( self.current_coin) if len(quotes) < 0: print( f"Couldn't find any quoted coins for provided symbol {self.current_coin}" ) return parser.add_argument( "--vs", help="Quote currency (what to view coin vs)", dest="vs", type=str, default="USDT" if "USDT" in quotes else quotes[0], choices=quotes, ) parser.add_argument( "-i", "--interval", help="Interval to get data", choices=list(interval_map.keys()), dest="interval", default="1day", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", default=100, help="Number to get", type=check_positive, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES) if ns_parser: if self.source in ["bin", "cb"]: limit = ns_parser.limit interval = ns_parser.interval days = 0 else: limit = 0 interval = "1day" days = ns_parser.days plot_chart( coin=self.current_coin, limit=limit, interval=interval, days=days, currency=ns_parser.vs, source=self.source, )
def call_regression(self, other_args: List[str]): """Process linear command""" parser = argparse.ArgumentParser( add_help=False, prog="regression", description=""" Regression attempts to model the relationship between two variables by fitting a linear/quadratic/cubic/other equation to observed data. One variable is considered to be an explanatory variable, and the other is considered to be a dependent variable. """, ) parser.add_argument( "-i", "--input", action="store", dest="n_inputs", type=check_positive, default=40, help="number of days to use for prediction.", ) parser.add_argument( "-d", "--days", action="store", dest="n_days", type=check_positive, default=5, help="prediction days.", ) parser.add_argument( "-j", "--jumps", action="store", dest="n_jumps", type=check_positive, default=1, help="number of jumps in training data.", ) parser.add_argument( "-e", "--end", action="store", type=valid_date, dest="s_end_date", default=None, help="The end date (format YYYY-MM-DD) to select - Backtesting", ) parser.add_argument( "-p", "--polynomial", action="store", dest="n_polynomial", type=check_positive, default=1, help="polynomial associated with regression.", ) if (other_args and "-h" not in other_args and ("-p" not in other_args or "--polynomial" not in other_args)): other_args.insert(0, "-p") ns_parser = parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED) if ns_parser: # BACKTESTING CHECK if ns_parser.s_end_date: if ns_parser.s_end_date < self.data.index[0]: print( "Backtesting not allowed, since End Date is older than Start Date of historical data\n" ) if ns_parser.s_end_date < get_next_stock_market_days( last_stock_day=self.data.index[0], n_next_days=5 + ns_parser.n_days, )[-1]: print( "Backtesting not allowed, since End Date is too close to Start Date to train model\n" ) regression_view.display_regression( dataset=self.coin, values=self.data[self.target], poly_order=ns_parser.n_polynomial, n_input=ns_parser.n_inputs, n_predict=ns_parser.n_days, n_jumps=ns_parser.n_jumps, s_end_date=ns_parser.s_end_date, export=ns_parser.export, time_res=self.resolution, )
def call_dpi(self, other_args: List[str]): """Process dpi command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="dpi", description=""" Displays DeFi Pulse crypto protocols. [Source: https://defipulse.com/] """, ) parser.add_argument( "-t", "--top", dest="top", type=check_positive, help="top N number records", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=["Rank", "Name", "Chain", "Category", "TVL", "Change_1D"], ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "--export", choices=["csv", "json", "xlsx"], default="", type=str, dest="export", help="Export dataframe data to csv,json,xlsx file", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return defipulse_view.display_defipulse( top=ns_parser.top, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) except Exception as e: print(e, "\n")
def call_llama(self, other_args: List[str]): """Process llama command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="llama", description=""" Display information about listed DeFi Protocols on DeFi Llama. [Source: https://docs.llama.fi/api] """, ) parser.add_argument( "-t", "--top", dest="top", type=check_positive, help="top N number records", default=10, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: tvl", default="tvl", choices=[ "tvl", "symbol", "category", "chains", "change_1h", "change_1d", "change_7d", "tvl", ], ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=False, ) parser.add_argument( "--desc", action="store_false", help="Flag to display description of protocol", dest="description", default=False, ) parser.add_argument( "--export", choices=["csv", "json", "xlsx"], default="", type=str, dest="export", help="Export dataframe data to csv,json,xlsx file", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return llama_view.display_defi_protocols( top=ns_parser.top, sortby=ns_parser.sortby, descend=ns_parser.descend, description=ns_parser.description, export=ns_parser.export, ) except Exception as e: print(e, "\n")
def regression( other_args: List[str], s_ticker: str, df_stock: pd.DataFrame, polynomial: int ): """ Train a regression model Parameters ---------- other_args: List[str] Argparse arguments s_ticker: str Stock ticker df_stock: pd.DataFrame Dataframe of stock prices polynomial: int Order of polynomial """ parser = argparse.ArgumentParser( add_help=False, prog="regression", description=""" Regression attempts to model the relationship between two variables by fitting a linear/quadratic/cubic/other equation to observed data. One variable is considered to be an explanatory variable, and the other is considered to be a dependent variable. """, ) parser.add_argument( "-i", "--input", action="store", dest="n_inputs", type=check_positive, default=40, help="number of days to use for prediction.", ) parser.add_argument( "-d", "--days", action="store", dest="n_days", type=check_positive, default=5, help="prediction days.", ) parser.add_argument( "-j", "--jumps", action="store", dest="n_jumps", type=check_positive, default=1, help="number of jumps in training data.", ) parser.add_argument( "-e", "--end", action="store", type=valid_date, dest="s_end_date", default=None, help="The end date (format YYYY-MM-DD) to select - Backtesting", ) if polynomial == USER_INPUT: parser.add_argument( "-p", "--polynomial", action="store", dest="n_polynomial", type=check_positive, required=True, help="polynomial associated with regression.", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return # BACKTESTING if ns_parser.s_end_date: if ns_parser.s_end_date < df_stock.index[0]: print( "Backtesting not allowed, since End Date is older than Start Date of historical data\n" ) return if ns_parser.s_end_date < get_next_stock_market_days( last_stock_day=df_stock.index[0], n_next_days=ns_parser.n_inputs + ns_parser.n_days, )[-1]: print( "Backtesting not allowed, since End Date is too close to Start Date to train model\n" ) return future_index = get_next_stock_market_days( last_stock_day=ns_parser.s_end_date, n_next_days=ns_parser.n_days ) if future_index[-1] > datetime.datetime.now(): print( "Backtesting not allowed, since End Date + Prediction days is in the future\n" ) return df_future = df_stock[future_index[0] : future_index[-1]] df_stock = df_stock[: ns_parser.s_end_date] # Split training data stock_x, stock_y = splitTrain.split_train( df_stock["5. adjusted close"].values, ns_parser.n_inputs, ns_parser.n_days, ns_parser.n_jumps, ) if not stock_x: print("Given the model parameters more training data is needed.\n") return # Machine Learning model if polynomial == LINEAR: model = linear_model.LinearRegression(n_jobs=-1) else: if polynomial == USER_INPUT: polynomial = ns_parser.n_polynomial model = pipeline.make_pipeline( preprocessing.PolynomialFeatures(polynomial), linear_model.Ridge() ) model.fit(stock_x, stock_y) l_predictions = [ i if i > 0 else 0 for i in model.predict( df_stock["5. adjusted close"] .values[-ns_parser.n_inputs :] .reshape(1, -1) )[0] ] # Prediction data l_pred_days = get_next_stock_market_days( last_stock_day=df_stock["5. adjusted close"].index[-1], n_next_days=ns_parser.n_days, ) df_pred = pd.Series(l_predictions, index=l_pred_days, name="Price") # Plotting plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI) plt.plot(df_stock.index, df_stock["5. adjusted close"], lw=2) # BACKTESTING if ns_parser.s_end_date: plt.title( f"BACKTESTING: Regression (polynomial {polynomial}) on {s_ticker} - {ns_parser.n_days} days prediction" ) else: plt.title( f"Regression (polynomial {polynomial}) on {s_ticker} - {ns_parser.n_days} days prediction" ) plt.xlim( df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1] ) plt.xlabel("Time") plt.ylabel("Share Price ($)") plt.grid(b=True, which="major", color="#666666", linestyle="-") plt.minorticks_on() plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) plt.plot( [df_stock.index[-1], df_pred.index[0]], [df_stock["5. adjusted close"].values[-1], df_pred.values[0]], lw=1, c="tab:green", linestyle="--", ) plt.plot(df_pred.index, df_pred, lw=2, c="tab:green") plt.axvspan( df_stock.index[-1], df_pred.index[-1], facecolor="tab:orange", alpha=0.2 ) _, _, ymin, ymax = plt.axis() plt.vlines( df_stock.index[-1], ymin, ymax, linewidth=1, linestyle="--", color="k" ) # BACKTESTING if ns_parser.s_end_date: plt.plot( df_future.index, df_future["5. adjusted close"], lw=2, c="tab:blue", ls="--", ) plt.plot( [df_stock.index[-1], df_future.index[0]], [ df_stock["5. adjusted close"].values[-1], df_future["5. adjusted close"].values[0], ], lw=1, c="tab:blue", linestyle="--", ) if gtff.USE_ION: plt.ion() plt.show() # BACKTESTING if ns_parser.s_end_date: plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI) plt.subplot(211) plt.plot( df_future.index, df_future["5. adjusted close"], lw=2, c="tab:blue", ls="--", ) plt.plot(df_pred.index, df_pred, lw=2, c="green") plt.scatter( df_future.index, df_future["5. adjusted close"], c="tab:blue", lw=3 ) plt.plot( [df_stock.index[-1], df_future.index[0]], [ df_stock["5. adjusted close"].values[-1], df_future["5. adjusted close"].values[0], ], lw=2, c="tab:blue", ls="--", ) plt.scatter(df_pred.index, df_pred, c="green", lw=3) plt.plot( [df_stock.index[-1], df_pred.index[0]], [df_stock["5. adjusted close"].values[-1], df_pred.values[0]], lw=2, c="green", ls="--", ) plt.title("BACKTESTING: Real data price versus Prediction") plt.xlim(df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)) plt.xticks( [df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)], visible=True, ) plt.ylabel("Share Price ($)") plt.grid(b=True, which="major", color="#666666", linestyle="-") plt.minorticks_on() plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) plt.legend(["Real data", "Prediction data"]) plt.xticks([]) plt.subplot(212) plt.axhline(y=0, color="k", linestyle="--", linewidth=2) plt.plot( df_future.index, 100 * (df_pred.values - df_future["5. adjusted close"].values) / df_future["5. adjusted close"].values, lw=2, c="red", ) plt.scatter( df_future.index, 100 * (df_pred.values - df_future["5. adjusted close"].values) / df_future["5. adjusted close"].values, c="red", lw=5, ) plt.title("BACKTESTING: Error between Real data and Prediction [%]") plt.plot( [df_stock.index[-1], df_future.index[0]], [ 0, 100 * (df_pred.values[0] - df_future["5. adjusted close"].values[0]) / df_future["5. adjusted close"].values[0], ], lw=2, ls="--", c="red", ) plt.xlim(df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)) plt.xticks( [df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)], visible=True, ) plt.xlabel("Time") plt.ylabel("Prediction Error (%)") plt.grid(b=True, which="major", color="#666666", linestyle="-") plt.minorticks_on() plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) plt.legend(["Real data", "Prediction data"]) if gtff.USE_ION: plt.ion() plt.show() # Refactor prediction dataframe for backtesting print df_pred.name = "Prediction" df_pred = df_pred.to_frame() df_pred["Real"] = df_future["5. adjusted close"] if gtff.USE_COLOR: patch_pandas_text_adjustment() print("Time Real [$] x Prediction [$]") print( df_pred.apply( price_prediction_backtesting_color, axis=1 ).to_string() ) else: print(df_pred[["Real", "Prediction"]].round(2).to_string()) print("") print_prediction_kpis(df_pred["Real"].values, df_pred["Prediction"].values) else: # Print prediction data print_pretty_prediction(df_pred, df_stock["5. adjusted close"].values[-1]) print("") except SystemExit: print("") except Exception as e: print(e) print("")
def latest_news_view(other_args: List[str]): """Prints the latest news article list Parameters ---------- other_args : List[str] argparse other args - ["-i", "123123", "-n", "5"] """ parser = argparse.ArgumentParser( add_help=False, prog="latest", description="""Latest news articles. [Source: Seeking Alpha]""", ) parser.add_argument( "-i", "--id", action="store", dest="n_id", type=check_positive, default=-1, help="article ID number", ) parser.add_argument( "-n", "--num", action="store", dest="n_num", type=check_positive, default=10, help="number of articles being printed", ) parser.add_argument( "-d", "--date", action="store", dest="n_date", type=valid_date, default=datetime.now().strftime("%Y-%m-%d"), help="starting date", ) if other_args: if "-" not in other_args[0]: other_args.insert(0, "-i") ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return # User wants to see all latest news if ns_parser.n_id == -1: articles = seeking_alpha_model.get_article_list( ns_parser.n_date, ns_parser.n_num) for idx, article in enumerate(articles): print( article["publishedAt"].replace("T", " ").replace("Z", ""), "-", article["id"], "-", article["title"], ) print(article["url"]) print("") if idx >= ns_parser.n_num - 1: break # User wants to access specific article else: article = seeking_alpha_model.get_article_data(ns_parser.n_id) print( article["publishedAt"][:article["publishedAt"].rfind(":") - 3].replace("T", " "), " ", article["title"], ) print(article["url"]) print("") print(article["content"])
def screener(other_args: List[str], loaded_preset: str, data_type: str) -> List[str]: """Screener Parameters ---------- other_args : List[str] Command line arguments to be processed with argparse ticker : str Loaded preset filter data_type : str Data type string between: overview, valuation, financial, ownership, performance, technical Returns ------- List[str] List of stocks that meet preset criteria """ parser = argparse.ArgumentParser( add_help=False, prog="screener", description=""" Prints screener data of the companies that meet the pre-set filtering. The following information fields are expected: overview, valuation, financial, ownership, performance, technical. Note that when the signal parameter (-s) is specified, the preset is disregarded. [Source: Finviz] """, ) parser.add_argument( "-p", "--preset", action="store", dest="preset", type=str, default=loaded_preset, help="Filter presets", choices=[ preset.split(".")[0] for preset in os.listdir(presets_path) if preset[-4:] == ".ini" ], ) parser.add_argument( "-s", "--signal", action="store", dest="signal", type=str, default=None, help="Signal", choices=list(d_signals.keys()), ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=0, help="Limit of stocks to print", ) parser.add_argument( "-a", "--ascend", action="store_true", default=False, dest="ascend", help="Set order to Ascend, the default is Descend", ) parser.add_argument( "-e", "--export", action="store_true", dest="exportFile", help="Save list as a text file", ) parser.add_argument( "-m", "--mill", action="store_true", dest="mill", help="Run papermill on list", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return [] df_screen = get_screener_data( ns_parser.preset, data_type, ns_parser.signal, ns_parser.limit, ns_parser.ascend, ) if isinstance(df_screen, pd.DataFrame): if df_screen.empty: return [] print(df_screen.to_string()) print("") if ns_parser.exportFile: now = datetime.now() if not os.path.exists("reports/screener"): os.makedirs("reports/screener") with open( f"reports/screener/{ns_parser.signal}-{now.strftime('%Y-%m-%d_%H:%M:%S')}", "w", ) as file: file.write(df_screen.to_string(index=False) + "\n") if ns_parser.mill: for i in range(len(df_screen)): ticker = [df_screen.iat[i, 0]] due_diligence_view.due_diligence_report(ticker) return list(df_screen["Ticker"].values) print("") return [] except Exception as e: print(e, "\n") return []
def call_ets(self, other_args: List[str]): """Process ets command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="ets", description=""" Exponential Smoothing, see https://otexts.com/fpp2/taxonomy.html Trend='N', Seasonal='N': Simple Exponential Smoothing Trend='N', Seasonal='A': Exponential Smoothing Trend='N', Seasonal='M': Exponential Smoothing Trend='A', Seasonal='N': Holt’s linear method Trend='A', Seasonal='A': Additive Holt-Winters’ method Trend='A', Seasonal='M': Multiplicative Holt-Winters’ method Trend='Ad', Seasonal='N': Additive damped trend method Trend='Ad', Seasonal='A': Exponential Smoothing Trend='Ad', Seasonal='M': Holt-Winters’ damped method Trend component: N: None, A: Additive, Ad: Additive Damped Seasonality component: N: None, A: Additive, M: Multiplicative """, ) parser.add_argument( "-d", "--days", action="store", dest="n_days", type=check_positive, default=5, help="prediction days.", ) parser.add_argument( "-t", "--trend", action="store", dest="trend", choices=ets_model.TRENDS, default="N", help="Trend component: N: None, A: Additive, Ad: Additive Damped.", ) parser.add_argument( "-s", "--seasonal", action="store", dest="seasonal", choices=ets_model.SEASONS, default="N", help= "Seasonality component: N: None, A: Additive, M: Multiplicative.", ) parser.add_argument( "-p", "--periods", action="store", dest="seasonal_periods", type=check_positive, default=5, help="Seasonal periods.", ) parser.add_argument( "-e", "--end", action="store", type=valid_date, dest="s_end_date", default=None, help="The end date (format YYYY-MM-DD) to select - Backtesting", ) ns_parser = parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED) if ns_parser: if ns_parser.s_end_date: if ns_parser.s_end_date < self.data.index[0]: print( "Backtesting not allowed, since End Date is older than Start Date of historical data\n" ) if ns_parser.s_end_date < get_next_stock_market_days( last_stock_day=self.data.index[0], n_next_days=5 + ns_parser.n_days, )[-1]: print( "Backtesting not allowed, since End Date is too close to Start Date to train model\n" ) ets_view.display_exponential_smoothing( ticker=self.coin, values=self.data[self.target], n_predict=ns_parser.n_days, trend=ns_parser.trend, seasonal=ns_parser.seasonal, seasonal_periods=ns_parser.seasonal_periods, s_end_date=ns_parser.s_end_date, export=ns_parser.export, time_res=self.resolution, )
def call_ta(self, other_args): """Process ta command""" from gamestonk_terminal.cryptocurrency.technical_analysis import ta_controller # TODO: Play with this to get correct usage if self.current_coin: parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="ta", description= """Loads data for technical analysis. You can specify currency vs which you want to show chart and also number of days to get data for. By default currency: usd and days: 60. E.g. if you loaded in previous step Ethereum and you want to see it's price vs btc in last 90 days range use `ta --vs btc --days 90`""", ) if self.source == "cp": parser.add_argument( "--vs", default="usd", dest="vs", help="Currency to display vs coin", choices=["usd", "btc", "BTC", "USD"], type=str, ) parser.add_argument( "-d", "--days", default=60, dest="days", help="Number of days to get data for", type=check_positive, ) if self.source == "cg": parser.add_argument("--vs", default="usd", dest="vs", help="Currency to display vs coin") parser.add_argument( "-d", "--days", default=60, dest="days", help="Number of days to get data for", ) if self.source == "bin": client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET) interval_map = { "1day": client.KLINE_INTERVAL_1DAY, "3day": client.KLINE_INTERVAL_3DAY, "1hour": client.KLINE_INTERVAL_1HOUR, "2hour": client.KLINE_INTERVAL_2HOUR, "4hour": client.KLINE_INTERVAL_4HOUR, "6hour": client.KLINE_INTERVAL_6HOUR, "8hour": client.KLINE_INTERVAL_8HOUR, "12hour": client.KLINE_INTERVAL_12HOUR, "1week": client.KLINE_INTERVAL_1WEEK, "1min": client.KLINE_INTERVAL_1MINUTE, "3min": client.KLINE_INTERVAL_3MINUTE, "5min": client.KLINE_INTERVAL_5MINUTE, "15min": client.KLINE_INTERVAL_15MINUTE, "30min": client.KLINE_INTERVAL_30MINUTE, "1month": client.KLINE_INTERVAL_1MONTH, } _, quotes = binance_model.show_available_pairs_for_given_symbol( self.current_coin) parser.add_argument( "--vs", help="Quote currency (what to view coin vs)", dest="vs", type=str, default="USDT", choices=quotes, ) parser.add_argument( "-i", "--interval", help="Interval to get data", choices=list(interval_map.keys()), dest="interval", default="1day", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", default=100, help="Number to get", type=check_positive, ) if self.source == "cb": interval_map = { "1min": 60, "5min": 300, "15min": 900, "1hour": 3600, "6hour": 21600, "24hour": 86400, "1day": 86400, } _, quotes = coinbase_model.show_available_pairs_for_given_symbol( self.current_coin) if len(quotes) < 0: print( f"Couldn't find any quoted coins for provided symbol {self.current_coin}" ) return parser.add_argument( "--vs", help="Quote currency (what to view coin vs)", dest="vs", type=str, default="USDT" if "USDT" in quotes else quotes[0], choices=quotes, ) parser.add_argument( "-i", "--interval", help="Interval to get data", choices=list(interval_map.keys()), dest="interval", default="1day", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", default=100, help="Number to get", type=check_positive, ) if self.source == "cb": interval_map = { "1min": 60, "5min": 300, "15min": 900, "1hour": 3600, "6hour": 21600, "24hour": 86400, "1day": 86400, } _, quotes = coinbase_model.show_available_pairs_for_given_symbol( self.current_coin) if len(quotes) < 0: print( f"Couldn't find any quoted coins for provided symbol {self.current_coin}" ) return parser.add_argument( "--vs", help="Quote currency (what to view coin vs)", dest="vs", type=str, default="USDT" if "USDT" in quotes else quotes[0], choices=quotes, ) parser.add_argument( "-i", "--interval", help="Interval to get data", choices=list(interval_map.keys()), dest="interval", default="1day", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", default=100, help="Number to get", type=check_positive, ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.source in ["bin", "cb"]: limit = ns_parser.limit interval = ns_parser.interval days = 0 else: limit = 0 interval = "1day" days = ns_parser.days self.current_df, self.current_currency = load_ta_data( coin=self.current_coin, source=self.source, currency=ns_parser.vs, days=days, limit=limit, interval=interval, ) if self.current_currency != "" and not self.current_df.empty: self.queue = ta_controller.menu( stock=self.current_df, ticker=self.current_coin, start=self.current_df.index[0], interval="", queue=self.queue, ) else: print( "No coin selected. Use 'load' to load the coin you want to look at.\n" )
def call_knn(self, other_args: List[str]): """Process knn command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="knn", description=""" K nearest neighbors is a simple algorithm that stores all available cases and predict the numerical target based on a similarity measure (e.g. distance functions). """, ) parser.add_argument( "-i", "--input", action="store", dest="n_inputs", type=check_positive, default=40, help="number of days to use as input for prediction.", ) parser.add_argument( "-d", "--days", action="store", dest="n_days", type=check_positive, default=5, help="prediction days.", ) parser.add_argument( "-j", "--jumps", action="store", dest="n_jumps", type=check_positive, default=1, help="number of jumps in training data.", ) parser.add_argument( "-n", "--neighbors", action="store", dest="n_neighbors", type=check_positive, default=20, help="number of neighbors to use on the algorithm.", ) parser.add_argument( "-e", "--end", action="store", type=valid_date, dest="s_end_date", default=None, help="The end date (format YYYY-MM-DD) to select for testing", ) parser.add_argument( "-t", "--test_size", default=0.2, dest="valid_split", type=float, help="Percentage of data to validate in sample", ) parser.add_argument( "--no_shuffle", action="store_false", dest="no_shuffle", default=True, help="Specify if shuffling validation inputs.", ) ns_parser = parse_known_args_and_warn(parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED) if ns_parser: knn_view.display_k_nearest_neighbors( ticker=self.coin, data=self.data[self.target], n_neighbors=ns_parser.n_neighbors, n_input_days=ns_parser.n_inputs, n_predict_days=ns_parser.n_days, test_size=ns_parser.valid_split, end_date=ns_parser.s_end_date, no_shuffle=ns_parser.no_shuffle, time_res=self.resolution, )
def call_find(self, other_args): """Process find command""" parser = argparse.ArgumentParser( prog="find", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Find similar coin by coin name,symbol or id. If you don't remember exact name or id of the Coin at CoinGecko, Binance, Coinbase or CoinPaprika you can use this command to display coins with similar name, symbol or id to your search query. Example of usage: coin name is something like "polka". So I can try: find -c polka -k name -t 25 It will search for coin that has similar name to polka and display top 25 matches. -c, --coin stands for coin - you provide here your search query -k, --key it's a searching key. You can search by symbol, id or name of coin -l, --limit it displays top N number of records.""", ) parser.add_argument( "-c", "--coin", help="Symbol Name or Id of Coin", dest="coin", required="-h" not in other_args, type=str, ) parser.add_argument( "-k", "--key", dest="key", help= "Specify by which column you would like to search: symbol, name, id", type=str, choices=FIND_KEYS, default="symbol", ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Number of records to display", type=check_positive, ) parser.add_argument( "--source", dest="source", choices=CRYPTO_SOURCES.keys(), default="cg", help="Source of data.", type=str, ) if other_args and not other_args[0][0] == "-": other_args.insert(0, "-c") ns_parser = parse_known_args_and_warn(parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED) if ns_parser: find( coin=ns_parser.coin, source=ns_parser.source, key=ns_parser.key, top=ns_parser.limit, export=ns_parser.export, )
def call_arima(self, other_args: List[str]): """Process arima command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="arima", description=""" In statistics and econometrics, and in particular in time series analysis, an autoregressive integrated moving average (ARIMA) model is a generalization of an autoregressive moving average (ARMA) model. Both of these models are fitted to time series data either to better understand the data or to predict future points in the series (forecasting). ARIMA(p,d,q) where parameters p, d, and q are non-negative integers, p is the order (number of time lags) of the autoregressive model, d is the degree of differencing (the number of times the data have had past values subtracted), and q is the order of the moving-average model. """, ) parser.add_argument( "-d", "--days", action="store", dest="n_days", type=check_positive, default=5, help="prediction days.", ) parser.add_argument( "-i", "--ic", action="store", dest="s_ic", type=str, default="aic", choices=arima_model.ICS, help="information criteria.", ) parser.add_argument( "-s", "--seasonal", action="store_true", default=False, dest="b_seasonal", help="Use weekly seasonal data.", ) parser.add_argument( "-o", "--order", action="store", dest="s_order", default="", type=str, help="arima model order (p,d,q) in format: p,d,q.", ) parser.add_argument( "-r", "--results", action="store_true", dest="b_results", default=False, help="results about ARIMA summary flag.", ) parser.add_argument( "-e", "--end", action="store", type=valid_date, dest="s_end_date", default=None, help="The end date (format YYYY-MM-DD) to select - Backtesting", ) ns_parser = parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_FIGURES_ALLOWED) if ns_parser: # BACKTESTING CHECK if ns_parser.s_end_date: if ns_parser.s_end_date < self.data.index[0]: print( "Backtesting not allowed, since End Date is older than Start Date of historical data\n" ) if ns_parser.s_end_date < get_next_stock_market_days( last_stock_day=self.data.index[0], n_next_days=5 + ns_parser.n_days, )[-1]: print( "Backtesting not allowed, since End Date is too close to Start Date to train model\n" ) arima_view.display_arima( dataset=self.coin, values=self.data[self.target], arima_order=ns_parser.s_order, n_predict=ns_parser.n_days, seasonal=ns_parser.b_seasonal, ic=ns_parser.s_ic, results=ns_parser.b_results, s_end_date=ns_parser.s_end_date, export=ns_parser.export, time_res=self.resolution, )
def management(other_args: List[str], ticker: str): """Display company's managers Parameters ---------- other_args : List[str] argparse other args ticker : str Stock ticker """ parser = argparse.ArgumentParser( add_help=False, prog="mgmt", description=""" Print management team. Namely: Name, Title, Information from google and (potentially) Insider Activity page. [Source: Business Insider] """, ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return url_market_business_insider = ( f"https://markets.businessinsider.com/stocks/{ticker.lower()}-stock" ) text_soup_market_business_insider = BeautifulSoup( requests.get(url_market_business_insider, headers={ "User-Agent": get_user_agent() }).text, "lxml", ) found_h2s = dict() for next_h2 in text_soup_market_business_insider.findAll( "h2", {"class": "header-underline"}): next_table = next_h2.find_next_sibling("table", {"class": "table"}) if next_table: found_h2s[next_h2.text] = next_table if found_h2s.get("Management") is None: print( f"No management information in Business Insider for {ticker}") print("") return l_titles = list() for s_title in found_h2s["Management"].findAll( "td", {"class": "table__td text-right"}): if any(c.isalpha() for c in s_title.text.strip()) and ( "USD" not in s_title.text.strip()): l_titles.append(s_title.text.strip()) l_names = list() for s_name in found_h2s["Management"].findAll( "td", {"class": "table__td table--allow-wrap"}): l_names.append(s_name.text.strip()) df_management = pd.DataFrame( { "Name": l_names[-len(l_titles):], "Title": l_titles }, columns=["Name", "Title"], ) df_management["Info"] = "-" df_management["Insider Activity"] = "-" df_management = df_management.set_index("Name") for s_name in df_management.index: df_management.loc[s_name][ "Info"] = f"http://www.google.com/search?q={s_name} {ticker.upper()}".replace( " ", "%20") s_url_base = "https://markets.businessinsider.com" for insider in text_soup_market_business_insider.findAll( "a", {"onclick": "silentTrackPI()"}): for s_name in df_management.index: if fuzz.token_set_ratio(s_name, insider.text.strip()) > 70: df_management.loc[s_name]["Insider Activity"] = ( s_url_base + insider.attrs["href"]) for ind in df_management.index: s_name = f"{ind}{(max([len(x) for x in df_management.index])-len(ind))*' '}" df_mgmt_title = df_management["Title"] spaces = max([len(x) for x in df_mgmt_title]) - len(df_mgmt_title[ind]) s_title = f"{df_mgmt_title[ind]}{spaces * ' '}" s_management = f"""{s_name} {s_title} {df_management['Info'][ind]}""" print(s_management) if df_management["Insider Activity"][ind] not in "-": print(f"{df_management['Insider Activity'][ind]}") print("") except Exception as e: print(e) print("") return
def sec_fillings(other_args: List[str], ticker: str): """Display SEC filings for a given stock ticker Parameters ---------- other_args : List[str] argparse other args - ["-n", "10"] ticker : str Stock ticker """ parser = argparse.ArgumentParser( add_help=False, prog="sec", description=""" Prints SEC filings of the company. The following fields are expected: Filing Date, Document Date, Type, Category, Amended, and Link. [Source: Market Watch] """, ) parser.add_argument( "-n", "--num", action="store", dest="n_num", type=check_positive, default=5, help="number of latest SEC filings.", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return pd.set_option("display.max_colwidth", None) url_financials = f"https://www.marketwatch.com/investing/stock/{ticker}/financials/secfilings" text_soup_financials = BeautifulSoup( requests.get(url_financials, headers={ "User-Agent": get_user_agent() }).text, "lxml", ) # a_financials_header = list() df_financials = None b_ready_to_process_info = False soup_financials = text_soup_financials.findAll("tr", {"class": "table__row"}) for financials_info in soup_financials: a_financials = financials_info.text.split("\n") # If header has been processed and dataframe created ready to populate the SEC information if b_ready_to_process_info: l_financials_info = [a_financials[2]] l_financials_info.extend(a_financials[5:-1]) l_financials_info.append(financials_info.a["href"]) # Append data values to financials df_financials.loc[len( df_financials.index)] = l_financials_info # type: ignore if "Filing Date" in a_financials: l_financials_header = [a_financials[2]] l_financials_header.extend(a_financials[5:-1]) l_financials_header.append("Link") df_financials = pd.DataFrame(columns=l_financials_header) df_financials.set_index("Filing Date") b_ready_to_process_info = True # Set Filing Date as index df_financials = df_financials.set_index("Filing Date") # type: ignore print(df_financials.head(n=ns_parser.n_num).to_string()) print("") except Exception as e: print(e) print("") return
def quantile( other_args: List[str], s_ticker: str, s_interval: str, df_stock: pd.DataFrame ): """Overlay Median & Quantile Parameters ---------- other_args: List[str] Argparse arguments s_ticker: str Ticker s_interval: str Data interval df_stock: pd.DataFrame Dataframe of dates and prices """ parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="quantile", description=""" The quantiles are values which divide the distribution such that there is a given proportion of observations below the quantile. For example, the median is a quantile. The median is the central value of the distribution, such that half the points are less than or equal to it and half are greater than or equal to it. By default, q is set at 0.5, which effectively is median. Change q to get the desired quantile (0<q<1). """, ) parser.add_argument( "-l", "--length", action="store", dest="n_length", type=check_positive, default=14, help="length", ) parser.add_argument( "-o", "--offset", action="store", dest="n_offset", type=check_positive, default=0, help="offset", ) parser.add_argument( "-q", "--quantile", action="store", dest="f_quantile", type=check_proportion_range, default=0.5, help="quantile", ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI) if s_interval == "1440min": plt.plot(df_stock.index, df_stock["Adj Close"].values, color="fuchsia") else: plt.plot(df_stock.index, df_stock["Close"].values, color="fuchsia") ax.set_title(f"{s_ticker} Median & Quantile") if s_interval == "1440min": df_ta_ = ta.median( close=df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() df_ta = ta.quantile( df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, q=ns_parser.f_quantile, ).dropna() else: df_ta_ = ta.median( close=df_stock["Adj Close"], length=ns_parser.n_length, offset=ns_parser.n_offset, ).dropna() df_ta = ta.quantile( df_stock["Close"], ns_parser.n_length, offset=ns_parser.n_offset, q=ns_parser.f_quantile, ).dropna() plt.plot(df_ta_.index, df_ta_.values, "g", lw=1, label="median") plt.plot(df_ta.index, df_ta.values, "b", lw=1, label="quantile") plt.title(f"Median & Quantile on {s_ticker}") plt.xlim(df_stock.index[0], df_stock.index[-1]) plt.xlabel("Time") plt.ylabel(f"{s_ticker} Price ($)") plt.grid(b=True, which="major", color="#666666", linestyle="-") plt.minorticks_on() plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) if gtff.USE_ION: plt.ion() plt.gcf().autofmt_xdate() fig.tight_layout(pad=1) plt.legend() plt.show() print("") except Exception as e: print(e, "\n")
def call_rsi(self, other_args: List[str]): """Call RSI Strategy""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="rsi", description= """Strategy that buys when the stock is less than a threshold and shorts when it exceeds a threshold.""", ) parser.add_argument( "-p", "--periods", dest="periods", help="Number of periods for RSI calculation", type=check_positive, default=14, ) parser.add_argument( "-u", "--high", default=70, dest="high", type=check_positive, help="High (upper) RSI Level", ) parser.add_argument( "-l", "--low", default=30, dest="low", type=check_positive, help="Low RSI Level", ) parser.add_argument( "--spy", action="store_true", default=False, help="Flag to add spy hold comparison", dest="spy", ) parser.add_argument( "--no_bench", action="store_true", default=False, help="Flag to not show buy and hold comparison", dest="no_bench", ) parser.add_argument( "--no_short", action="store_false", default=True, dest="shortable", help="Flag that disables the short sell", ) ns_parser = parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED) if ns_parser: if ns_parser.high < ns_parser.low: print("Low RSI value is higher than Low RSI value\n") bt_view.display_rsi_strategy( ticker=self.ticker, df_stock=self.stock, periods=ns_parser.periods, low_rsi=ns_parser.low, high_rsi=ns_parser.high, spy_bt=ns_parser.spy, no_bench=ns_parser.no_bench, shortable=ns_parser.shortable, export=ns_parser.export, )
def orders(l_args): parser = argparse.ArgumentParser( prog="orders", description=""" Orders by Fidelity customers. Information shown in the table below is based on the volume of orders entered on the "as of" date shown. Securities identified are not recommended or endorsed by Fidelity and are displayed for informational purposes only. [Source: Fidelity] """, ) parser.add_argument( "-n", "--num", action="store", dest="n_num", type=check_positive, default=10, help="Number of top ordered stocks to be printed.", ) ns_parser = parse_known_args_and_warn(parser, l_args) url_orders = ( "https://eresearch.fidelity.com/eresearch/gotoBL/fidelityTopOrders.jhtml" ) text_soup_url_orders = BeautifulSoup( requests.get(url_orders, headers={ "User-Agent": get_user_agent() }).text, "lxml") l_orders = list() l_orders_vals = list() idx = 0 order_list = text_soup_url_orders.findAll( "td", { "class": [ "second", "third", "fourth", "fifth", "sixth", "seventh", "eight" ] }, ) for an_order in order_list: if ((idx + 1) % 3 == 0) or ((idx + 1) % 4 == 0) or ((idx + 1) % 6 == 0): if not an_order: l_orders_vals.append("") else: l_orders_vals.append(an_order.contents[1]) elif (idx + 1) % 5 == 0: s_orders = str(an_order) l_orders_vals.append(s_orders[s_orders.find('title="') + len('title="'):s_orders.find('"/>')]) else: l_orders_vals.append(an_order.text.strip()) idx += 1 # Add value to dictionary if (idx + 1) % 8 == 0: l_orders.append(l_orders_vals) l_orders_vals = list() idx = 0 df_orders = pd.DataFrame( l_orders, columns=[ "Symbol", "Company", "Price Change", "# Buy Orders", "Buy / Sell Ratio", "# Sell Orders", "Latest News", ], ) df_orders = df_orders[[ "Symbol", "Buy / Sell Ratio", "Price Change", "Company", "# Buy Orders", "# Sell Orders", "Latest News", ]] print( text_soup_url_orders.findAll("span", {"class": "source"}) [0].text.capitalize() + ":") pd.set_option("display.max_colwidth", None) if USE_COLOR: df_orders["Buy / Sell Ratio"] = df_orders["Buy / Sell Ratio"].apply( buy_sell_ratio_color_red_green) df_orders["Price Change"] = df_orders["Price Change"].apply( price_change_color_red_green) patch_pandas_text_adjustment() print( df_orders.head(n=ns_parser.n_num).iloc[:, :-1].to_string(index=False)) print("")