Ejemplo n.º 1
0
 def call_ford(self, other_args: List[str]):
     """Process ford command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="ford",
         description="""
             Orders by Fidelity customers. Information shown in the table below
             is based on the volume of orders entered on the "as of" date shown. Securities
             identified are not recommended or endorsed by Fidelity and are displayed for
             informational purposes only. [Source: Fidelity]
         """,
     )
     parser.add_argument(
         "-l",
         "--limit",
         action="store",
         dest="limit",
         type=check_int_range(1, 25),
         default=5,
         help="Limit of stocks to display.",
     )
     if other_args and "-" not in other_args[0][0]:
         other_args.insert(0, "-l")
     ns_parser = parse_known_args_and_warn(parser, other_args,
                                           EXPORT_ONLY_RAW_DATA_ALLOWED)
     if ns_parser:
         fidelity_view.orders_view(
             num=ns_parser.limit,
             export=ns_parser.export,
         )
Ejemplo n.º 2
0
 def call_infer(self, other_args: List[str]):
     """Process infer command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="infer",
         description="""
             Print quick sentiment inference from last tweets that contain the ticker.
             This model splits the text into character-level tokens and uses vader sentiment analysis.
             [Source: Twitter]
         """,
     )
     parser.add_argument(
         "-l",
         "--limit",
         action="store",
         dest="limit",
         type=check_int_range(10, 100),
         default=100,
         help="limit of latest tweets to infer from.",
     )
     if other_args and "-" not in other_args[0][0]:
         other_args.insert(0, "-l")
     ns_parser = parse_known_args_and_warn(
         parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES)
     if ns_parser:
         if self.ticker:
             twitter_view.display_inference(ticker=self.ticker,
                                            num=ns_parser.limit)
         else:
             console.print(
                 "No ticker loaded. Please load using 'load <ticker>'\n")
Ejemplo n.º 3
0
 def call_asc(self, other_args: List[str]):
     """Process asc command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="asc",
         description="""
             Print up to 25 small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
         """,
     )
     parser.add_argument(
         "-l",
         "--limit",
         action="store",
         dest="limit",
         type=check_int_range(1, 25),
         default=5,
         help="Limit of stocks to display.",
     )
     if other_args and "-" not in other_args[0][0]:
         other_args.insert(0, "-l")
     ns_parser = parse_known_args_and_warn(parser, other_args,
                                           EXPORT_ONLY_RAW_DATA_ALLOWED)
     if ns_parser:
         yahoofinance_view.display_asc(
             num_stocks=ns_parser.limit,
             export=ns_parser.export,
         )
Ejemplo n.º 4
0
 def call_shorted(self, other_args: List[str]):
     """Process shorted command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="shorted",
         description=
         "Print up to 25 top ticker most shorted. [Source: Yahoo Finance]",
     )
     parser.add_argument(
         "-l",
         "--limit",
         action="store",
         dest="limit",
         type=check_int_range(1, 25),
         default=10,
         help="Limit of the most shorted stocks to retrieve.",
     )
     if other_args and "-" not in other_args[0][0]:
         other_args.insert(0, "-l")
     ns_parser = parse_known_args_and_warn(parser, other_args,
                                           EXPORT_ONLY_RAW_DATA_ALLOWED)
     if ns_parser:
         yahoofinance_view.display_most_shorted(
             num_stocks=ns_parser.limit,
             export=ns_parser.export,
         )
Ejemplo n.º 5
0
 def call_hsi(self, other_args: List[str]):
     """Process hsi command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="hsi",
         description="""
             Print top stocks being more heavily shorted. HighShortInterest.com provides
             a convenient sorted database of stocks which have a short interest of over
             20 percent. Additional key data such as the float, number of outstanding shares,
             and company industry is displayed. Data is presented for the Nasdaq Stock Market,
             the New York Stock Exchange, and the American Stock Exchange. [Source: www.highshortinterest.com]
         """,
     )
     parser.add_argument(
         "-l",
         "--limit",
         action="store",
         dest="limit",
         type=check_int_range(1, 25),
         default=10,
         help="Limit of the top heavily shorted stocks to retrieve.",
     )
     ns_parser = parse_known_args_and_warn(parser, other_args,
                                           EXPORT_ONLY_RAW_DATA_ALLOWED)
     if ns_parser:
         shortinterest_view.high_short_interest(
             num=ns_parser.limit,
             export=ns_parser.export,
         )
Ejemplo n.º 6
0
    def call_infer(self, other_args: List[str]):
        """Process infer command"""
        parser = argparse.ArgumentParser(
            add_help=False,
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            prog="infer",
            description="""
                Print quick sentiment inference from last tweets that contain the ticker.
                This model splits the text into character-level tokens and uses vader sentiment analysis.
                [Source: Twitter]
            """,
        )
        parser.add_argument(
            "-n",
            "--num",
            action="store",
            dest="n_num",
            type=check_int_range(10, 100),
            default=100,
            help="num of latest tweets to infer from.",
        )

        try:
            ns_parser = parse_known_args_and_warn(parser, other_args)
            if not ns_parser:
                return

            if self._check_ticker():
                twitter_view.display_inference(ticker=self.ticker,
                                               num=ns_parser.n_num)
        except Exception as e:
            print(e, "\n")
Ejemplo n.º 7
0
 def call_sentiment(self, other_args: List[str]):
     """Process sentiment command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="sentiment",
         description="""
             Plot in-depth sentiment predicted from tweets from last days
             that contain pre-defined ticker. [Source: Twitter]
         """,
     )
     # in reality this argument could be 100, but after testing it takes too long
     # to compute which may not be acceptable
     # TODO: use https://github.com/twintproject/twint instead of twitter API
     parser.add_argument(
         "-l",
         "--limit",
         action="store",
         dest="limit",
         type=check_int_range(10, 62),
         default=15,
         help="limit of tweets to extract per hour.",
     )
     parser.add_argument(
         "-d",
         "--days",
         action="store",
         dest="n_days_past",
         type=check_int_range(1, 6),
         default=6,
         help="number of days in the past to extract tweets.",
     )
     if other_args and "-" not in other_args[0][0]:
         other_args.insert(0, "-l")
     ns_parser = parse_known_args_and_warn(
         parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES)
     if ns_parser:
         if self.ticker:
             twitter_view.display_sentiment(
                 ticker=self.ticker,
                 n_tweets=ns_parser.limit,
                 n_days_past=ns_parser.n_days_past,
                 export=ns_parser.export,
             )
         else:
             console.print(
                 "No ticker loaded. Please load using 'load <ticker>'\n")
Ejemplo n.º 8
0
 def call_sentiment(self, other_args: List[str]):
     """Process sentiment command"""
     parser = argparse.ArgumentParser(
         add_help=False,
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
         prog="sentiment",
         description="""
             Plot in-depth sentiment predicted from tweets from last days
             that contain pre-defined ticker. [Source: Twitter]
         """,
     )
     # in reality this argument could be 100, but after testing it takes too long
     # to compute which may not be acceptable
     parser.add_argument(
         "-n",
         "--num",
         action="store",
         dest="n_tweets",
         type=check_int_range(10, 62),
         default=15,
         help="number of tweets to extract per hour.",
     )
     parser.add_argument(
         "-d",
         "--days",
         action="store",
         dest="n_days_past",
         type=check_int_range(1, 6),
         default=6,
         help="number of days in the past to extract tweets.",
     )
     try:
         ns_parser = parse_known_args_and_warn(parser, other_args)
         if not ns_parser:
             return
         if self._check_ticker():
             twitter_view.display_sentiment(
                 ticker=self.ticker,
                 n_tweets=ns_parser.n_tweets,
                 n_days_past=ns_parser.n_days_past,
                 export=ns_parser.export,
             )
     except Exception as e:
         print(e, "\n")
Ejemplo n.º 9
0
    def call_ugs(self, other_args: List[str]):
        """Process ugs command"""
        parser = argparse.ArgumentParser(
            add_help=False,
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            prog="ugs",
            description="""
                Print up to 25 undervalued stocks with revenue and earnings growth in excess of 25%.
                [Source: Yahoo Finance]
            """,
        )
        parser.add_argument(
            "-n",
            "--num",
            action="store",
            dest="num",
            type=check_int_range(1, 25),
            default=5,
            help="Number of stocks to display.",
        )
        parser.add_argument(
            "--export",
            choices=["csv", "json", "xlsx"],
            default="",
            type=str,
            dest="export",
            help="Export dataframe data to csv,json,xlsx file",
        )
        try:
            if other_args:
                if "-" not in other_args[0]:
                    other_args.insert(0, "-n")

            ns_parser = parse_known_args_and_warn(parser, other_args)
            if not ns_parser:
                return

            yahoofinance_view.display_ugs(
                num_stocks=ns_parser.num,
                export=ns_parser.export,
            )

        except Exception as e:
            print(e, "\n")
Ejemplo n.º 10
0
    def call_shorted(self, other_args: List[str]):
        """Process shorted command"""
        parser = argparse.ArgumentParser(
            add_help=False,
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            prog="shorted",
            description=
            "Print up to 25 top ticker most shorted. [Source: Yahoo Finance]",
        )
        parser.add_argument(
            "-n",
            "--num",
            action="store",
            dest="num",
            type=check_int_range(1, 25),
            default=5,
            help="Number of the most shorted stocks to retrieve.",
        )
        parser.add_argument(
            "--export",
            choices=["csv", "json", "xlsx"],
            default="",
            type=str,
            dest="export",
            help="Export dataframe data to csv,json,xlsx file",
        )
        try:
            if other_args:
                if "-" not in other_args[0]:
                    other_args.insert(0, "-n")

            ns_parser = parse_known_args_and_warn(parser, other_args)
            if not ns_parser:
                return

            yahoofinance_view.display_most_shorted(
                num_stocks=ns_parser.num,
                export=ns_parser.export,
            )

        except Exception as e:
            print(e, "\n")
Ejemplo n.º 11
0
def sentiment(other_args: List[str], s_ticker: str):
    """
    Plot sentiments from ticker
    Parameters
    ----------
    other_args: List[str]
        Argparse arguments
    s_ticker: str
        Stock to get sentiment for

    """
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="sen",
        description="""
            Plot in-depth sentiment predicted from tweets from last days
            that contain pre-defined ticker. [Source: Twitter]
        """,
    )

    # in reality this argument could be 100, but after testing it takes too long
    # to compute which may not be acceptable
    parser.add_argument(
        "-n",
        "--num",
        action="store",
        dest="n_tweets",
        type=check_int_range(10, 62),
        default=15,
        help="number of tweets to extract per hour.",
    )
    parser.add_argument(
        "-d",
        "--days",
        action="store",
        dest="n_days_past",
        type=check_int_range(1, 6),
        default=6,
        help="number of days in the past to extract tweets.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, other_args)
        if not ns_parser:
            return

        # Date format string required by twitter
        dtformat = "%Y-%m-%dT%H:%M:%SZ"

        # Algorithm to extract
        dt_recent = datetime.now() - timedelta(seconds=20)
        dt_old = dt_recent - timedelta(days=ns_parser.n_days_past)
        print(
            f"From {dt_recent.date()} retrieving {ns_parser.n_tweets*24} tweets ({ns_parser.n_tweets} tweets/hour)"
        )

        df_tweets = pd.DataFrame(columns=[
            "created_at",
            "text",
            "sentiment",
            "positive",
            "negative",
            "neutral",
        ])
        while True:
            # Iterate until we haven't passed the old number of days
            if dt_recent < dt_old:
                break
            # Update past datetime
            dt_past = dt_recent - timedelta(minutes=60)

            temp = load_analyze_tweets(
                s_ticker,
                ns_parser.n_tweets,
                start_time=dt_past.strftime(dtformat),
                end_time=dt_recent.strftime(dtformat),
            )

            if temp.empty:
                return

            df_tweets = pd.concat([df_tweets, temp])

            if dt_past.day < dt_recent.day:
                print(
                    f"From {dt_past.date()} retrieving {ns_parser.n_tweets*24} tweets ({ns_parser.n_tweets} tweets/hour)"
                )

            # Update recent datetime
            dt_recent = dt_past

        # Sort tweets per date
        df_tweets.sort_index(ascending=False, inplace=True)
        df_tweets["cumulative_compound"] = df_tweets["sentiment"].cumsum()
        df_tweets["prob_sen"] = 1

        # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)
        df_tweets.reset_index(inplace=True)
        df_tweets["Month"] = pd.to_datetime(
            df_tweets["created_at"]).apply(lambda x: x.month)
        df_tweets["Day"] = pd.to_datetime(
            df_tweets["created_at"]).apply(lambda x: x.day)
        df_tweets["date"] = pd.to_datetime(df_tweets["created_at"])
        df_tweets = df_tweets.sort_values(by="date")
        df_tweets["cumulative_compound"] = df_tweets["sentiment"].cumsum()
        _, ax = plt.subplots(2,
                             1,
                             figsize=plot_autoscale(),
                             dpi=cfg_plot.PLOT_DPI)
        ax[0].plot(
            pd.to_datetime(df_tweets["created_at"]),
            df_tweets["cumulative_compound"].values,
            lw=3,
            c="cyan",
        )
        ax[0].set_ylabel("Cumulative VADER Sentiment")
        xlocations = []
        xlabels = []
        for _, day_df in df_tweets.groupby(by="Day"):
            day_df["time"] = pd.to_datetime(day_df["created_at"])
            day_df = day_df.sort_values(by="time")
            ax[0].plot(day_df["time"],
                       day_df["sentiment"].cumsum(),
                       c="tab:blue")
            xlocations.append(day_df.time.values[0])
            xlabels.append(
                day_df["time"].apply(lambda x: x.strftime("%m-%d")).values[0])

            ax[1].bar(df_tweets["date"],
                      df_tweets["positive"],
                      color="green",
                      width=0.02)
        ax[1].bar(df_tweets["date"],
                  -1 * df_tweets["negative"],
                  color="red",
                  width=0.02)
        ax[0].grid(b=True,
                   which="major",
                   color="#666666",
                   linestyle="-",
                   lw=1.5,
                   alpha=0.5)
        ax[0].minorticks_on()
        ax[0].grid(b=True,
                   which="minor",
                   color="#999999",
                   linestyle="-",
                   alpha=0.2)
        ax[0].set_xticks(xlocations)
        ax[0].set_xticklabels(xlabels)

        ax[1].grid(b=True,
                   which="major",
                   color="#666666",
                   linestyle="-",
                   lw=1.5,
                   alpha=0.5)
        ax[1].minorticks_on()
        ax[1].grid(b=True,
                   which="minor",
                   color="#999999",
                   linestyle="-",
                   alpha=0.2)
        ax[1].set_ylabel("VADER Polarity Scores")
        ax[1].set_xticks(xlocations)
        ax[1].set_xticklabels(xlabels)
        plt.suptitle(
            f"Twitter's {s_ticker} total compound sentiment over time is {np.sum(df_tweets['sentiment'])}"
        )
        if gtff.USE_ION:
            plt.ion()
        plt.show()
        print("")

    except Exception as e:
        print(e, "\n")
Ejemplo n.º 12
0
def inference(other_args: List[str], s_ticker: str):
    """
    Infer sentiment from past n tweets
    Parameters
    ----------
    other_args: List[str]
        Arguments for argparse
    s_ticker: str
        Stock ticker

    """
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="infer",
        description="""
            Print quick sentiment inference from last tweets that contain the ticker.
            This model splits the text into character-level tokens and uses vader sentiment analysis.
            [Source: Twitter]
        """,
    )

    parser.add_argument(
        "-n",
        "--num",
        action="store",
        dest="n_num",
        type=check_int_range(10, 100),
        default=100,
        help="num of latest tweets to infer from.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, other_args)
        if not ns_parser:
            return

        df_tweets = load_analyze_tweets(s_ticker, ns_parser.n_num)

        if df_tweets.empty:
            return

        # Parse tweets
        dt_from = dparse.parse(df_tweets["created_at"].values[-1])
        dt_to = dparse.parse(df_tweets["created_at"].values[0])
        print(f"From: {dt_from.strftime('%Y-%m-%d %H:%M:%S')}")
        print(f"To:   {dt_to.strftime('%Y-%m-%d %H:%M:%S')}")

        print(f"{len(df_tweets)} tweets were analyzed.")
        dt_delta = dt_to - dt_from
        n_freq = dt_delta.total_seconds() / len(df_tweets)
        print(f"Frequency of approx 1 tweet every {round(n_freq)} seconds.")

        pos = df_tweets["positive"]
        neg = df_tweets["negative"]

        percent_pos = len(np.where(pos > neg)[0]) / len(df_tweets)
        percent_neg = len(np.where(pos < neg)[0]) / len(df_tweets)
        total_sent = np.round(np.sum(df_tweets["sentiment"]), 2)
        mean_sent = np.round(np.mean(df_tweets["sentiment"]), 2)
        print(f"The summed compound sentiment of {s_ticker} is: {total_sent}")
        print(f"The average compound sentiment of {s_ticker} is: {mean_sent}")
        print(
            f"Of the last {len(df_tweets)} tweets, {100*percent_pos:.2f} % had a higher positive sentiment"
        )
        print(
            f"Of the last {len(df_tweets)} tweets, {100*percent_neg:.2f} % had a higher negative sentiment"
        )
        print("")

    except Exception as e:
        print(e, "\n")
Ejemplo n.º 13
0
    def call_whales(self, other_args: List[str]):
        """Process whales command"""
        parser = argparse.ArgumentParser(
            add_help=False,
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            prog="whales",
            description="""
                Display crypto whales transactions.
                [Source: https://docs.whale-alert.io/]
            """,
        )

        parser.add_argument(
            "-m",
            "--min",
            dest="min",
            type=check_int_range(500000, 100**7),
            help="Minimum value of transactions.",
            default=1000000,
        )

        parser.add_argument(
            "-l",
            "--limit",
            dest="limit",
            type=check_positive,
            help="display N number records",
            default=10,
        )

        parser.add_argument(
            "-s",
            "--sort",
            dest="sortby",
            type=str,
            help="Sort by given column. Default: date",
            default="date",
            choices=whale_alert_model.FILTERS,
        )

        parser.add_argument(
            "--descend",
            action="store_false",
            help="Flag to sort in descending order (lowest first)",
            dest="descend",
            default=True,
        )

        parser.add_argument(
            "-a",
            "--address",
            dest="address",
            action="store_true",
            help="Flag to show addresses of transaction",
            default=False,
        )

        ns_parser = parse_known_args_and_warn(parser, other_args,
                                              EXPORT_ONLY_RAW_DATA_ALLOWED)
        if ns_parser:
            whale_alert_view.display_whales_transactions(
                min_value=ns_parser.min,
                top=ns_parser.limit,
                sortby=ns_parser.sortby,
                descend=ns_parser.descend,
                show_address=ns_parser.address,
                export=ns_parser.export,
            )
    def call_whales(self, other_args: List[str]):
        """Process whales command"""
        parser = argparse.ArgumentParser(
            add_help=False,
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            prog="whales",
            description="""
                Display crypto whales transactions.
                [Source: https://docs.whale-alert.io/]
            """,
        )

        parser.add_argument(
            "-m",
            "--min",
            dest="min",
            type=check_int_range(500000, 100**7),
            help="Minimum value of transactions.",
            default=1000000,
        )

        parser.add_argument(
            "-t",
            "--top",
            dest="top",
            type=check_positive,
            help="top N number records",
            default=10,
        )

        parser.add_argument(
            "-s",
            "--sort",
            dest="sortby",
            type=str,
            help="Sort by given column. Default: date",
            default="date",
            choices=[
                "date",
                "symbol",
                "blockchain",
                "amount",
                "amount_usd",
                "from",
                "to",
            ],
        )
        parser.add_argument(
            "--descend",
            action="store_false",
            help="Flag to sort in descending order (lowest first)",
            dest="descend",
            default=True,
        )

        parser.add_argument(
            "-a",
            "--balance",
            dest="balance",
            action="store_true",
            help="Flag to show addresses of transaction",
            default=False,
        )

        parser.add_argument(
            "--export",
            choices=["csv", "json", "xlsx"],
            default="",
            type=str,
            dest="export",
            help="Export dataframe data to csv,json,xlsx file",
        )

        try:
            ns_parser = parse_known_args_and_warn(parser, other_args)

            if not ns_parser:
                return

            whale_alert_view.display_whales_transactions(
                min_value=ns_parser.min,
                top=ns_parser.top,
                sortby=ns_parser.sortby,
                descend=ns_parser.descend,
                show_address=ns_parser.address,
                export=ns_parser.export,
            )

        except Exception as e:
            print(e)