Example #1
0
def export(l_args, df_stock):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="export",
        description=
        "Exports the historical data from this ticker to a file or stdout.",
    )
    parser.add_argument(
        "-f",
        "--filename",
        type=str,
        dest="s_filename",
        default=stdout,
        help=
        "Name of file to save the historical data exported (stdout if unspecified)",
    )
    parser.add_argument(
        "-F",
        "--format",
        dest="s_format",
        type=str,
        default="csv",
        help=
        "Export historical data into following formats: csv, json, excel, clipboard",
    )
    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

    except SystemExit:
        print("")
        return

    if df_stock.empty:
        print("No data loaded yet to export.")
        return

    if ns_parser.s_format == "csv":
        df_stock.to_csv(ns_parser.s_filename)

    elif ns_parser.s_format == "json":
        df_stock.to_json(ns_parser.s_filename)

    elif ns_parser.s_format == "excel":
        df_stock.to_excel(ns_parser.s_filename)

    elif ns_parser.s_format == "clipboard":
        df_stock.to_clipboard()

    print("")
Example #2
0
def wsb_community(l_args):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="wsb",
        description=
        """Print what WSB gang are up to in subreddit wallstreetbets. [Source: Reddit]""",
    )
    parser.add_argument(
        "-l",
        "--limit",
        action="store",
        dest="n_limit",
        type=check_positive,
        default=10,
        help="limit of posts to print.",
    )
    parser.add_argument(
        "-n",
        "--new",
        action="store_true",
        default=False,
        dest="b_new",
        help=
        "new flag, if true the posts retrieved are based on being more recent rather than their score.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        praw_api = praw.Reddit(
            client_id=cfg.API_REDDIT_CLIENT_ID,
            client_secret=cfg.API_REDDIT_CLIENT_SECRET,
            username=cfg.API_REDDIT_USERNAME,
            user_agent=cfg.API_REDDIT_USER_AGENT,
            password=cfg.API_REDDIT_PASSWORD,
        )

        d_submission = {}
        l_watchlist_links = list()

        # psaw_api = PushshiftAPI()

        if ns_parser.b_new:
            submissions = praw_api.subreddit("wallstreetbets").new(
                limit=ns_parser.n_limit)
        else:
            submissions = praw_api.subreddit("wallstreetbets").hot(
                limit=ns_parser.n_limit)

        while True:
            try:
                submission = next(submissions, None)
                if submission:
                    # Get more information about post using PRAW api
                    submission = praw_api.submission(id=submission.id)

                    # Ensure that the post hasn't been removed  by moderator in the meanwhile,
                    # that there is a description and it's not just an image, that the flair is
                    # meaningful, and that we aren't re-considering same author's watchlist
                    if not submission.removed_by_category:

                        l_watchlist_links.append(
                            f"https://old.reddit.com{submission.permalink}")

                        print_and_record_reddit_post(d_submission, submission)

                # Check if search_submissions didn't get anymore posts
                else:
                    break
            except Exception as e:
                print(e)
                print("")
            print("")
    except Exception as e:
        print(e)
        print("")
Example #3
0
def spac(l_args):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="spac",
        description=""" Show other users SPACs announcement [Reddit] """,
    )
    parser.add_argument(
        "-l",
        "--limit",
        action="store",
        dest="n_limit",
        type=check_positive,
        default=5,
        help="limit of posts with SPACs retrieved.",
    )
    parser.add_argument(
        "-d",
        "--days",
        action="store",
        dest="n_days",
        type=check_positive,
        default=5,
        help="look for the tickers from those n past days.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        praw_api = praw.Reddit(
            client_id=cfg.API_REDDIT_CLIENT_ID,
            client_secret=cfg.API_REDDIT_CLIENT_SECRET,
            username=cfg.API_REDDIT_USERNAME,
            user_agent=cfg.API_REDDIT_USER_AGENT,
            password=cfg.API_REDDIT_PASSWORD,
        )

        d_submission = {}
        d_watchlist_tickers = {}
        l_watchlist_links = list()
        l_watchlist_author = list()

        n_ts_after = int(
            (datetime.today() - timedelta(days=ns_parser.n_days)).timestamp())
        l_sub_reddits = [
            "pennystocks",
            "RobinHoodPennyStocks",
            "Daytrading",
            "StockMarket",
            "stocks",
            "investing",
            "wallstreetbets",
        ]

        warnings.filterwarnings("ignore")  # To avoid printing the warning
        psaw_api = PushshiftAPI()
        submissions = psaw_api.search_submissions(
            after=n_ts_after,
            subreddit=l_sub_reddits,
            q="SPAC|Spac|spac|Spacs|spacs",
            filter=["id"],
        )
        n_flair_posts_found = 0
        while True:
            try:
                submission = next(submissions, None)
                if submission:
                    # Get more information about post using PRAW api
                    submission = praw_api.submission(id=submission.id)

                    # Ensure that the post hasn't been removed  by moderator in the meanwhile,
                    # that there is a description and it's not just an image, that the flair is
                    # meaningful, and that we aren't re-considering same author's watchlist
                    if (not submission.removed_by_category
                            and submission.selftext and
                            submission.link_flair_text not in ["Yolo", "Meme"]
                            and submission.author.name
                            not in l_watchlist_author):
                        l_tickers_found = find_tickers(submission)

                        if l_tickers_found:
                            # Add another author's name to the parsed watchlists
                            l_watchlist_author.append(submission.author.name)

                            # Lookup stock tickers within a watchlist
                            for key in l_tickers_found:
                                if key in d_watchlist_tickers:
                                    # Increment stock ticker found
                                    d_watchlist_tickers[key] += 1
                                else:
                                    # Initialize stock ticker found
                                    d_watchlist_tickers[key] = 1

                            l_watchlist_links.append(
                                f"https://old.reddit.com{submission.permalink}"
                            )

                            print_and_record_reddit_post(
                                d_submission, submission)

                            # Increment count of valid posts found
                            n_flair_posts_found += 1

                    # Check if number of wanted posts found has been reached
                    if n_flair_posts_found > ns_parser.n_limit - 1:
                        break

                # Check if search_submissions didn't get anymore posts
                else:
                    break
            except Exception as e:
                print(e)
                print("")

        if n_flair_posts_found:
            lt_watchlist_sorted = sorted(d_watchlist_tickers.items(),
                                         key=lambda item: item[1],
                                         reverse=True)
            s_watchlist_tickers = ""
            n_tickers = 0
            for t_ticker in lt_watchlist_sorted:
                try:
                    # If try doesn't trigger exception, it means that this stock exists on finviz
                    # thus we can print it.
                    finviz.get_stock(t_ticker[0])
                    if int(t_ticker[1]) > 1:
                        s_watchlist_tickers += f"{t_ticker[1]} {t_ticker[0]}, "
                    n_tickers += 1
                except Exception as e:
                    print(e)
                    # pass
            if n_tickers:
                print(
                    "The following stock tickers have been mentioned more than once across the previous SPACs:"
                )
                print(s_watchlist_tickers[:-2])
        print("")

    except Exception as e:
        print(e)
        print("")
Example #4
0
def spac_community(l_args):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="spac_c",
        description=
        """Print other users SPACs announcement under subreddit 'SPACs' [Source: Reddit]""",
    )
    parser.add_argument(
        "-l",
        "--limit",
        action="store",
        dest="n_limit",
        type=check_positive,
        default=10,
        help="limit of posts with SPACs retrieved",
    )
    parser.add_argument(
        "-p",
        "--popular",
        action="store_true",
        default=False,
        dest="b_popular",
        help=
        "popular flag, if true the posts retrieved are based on score rather than time",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        praw_api = praw.Reddit(
            client_id=cfg.API_REDDIT_CLIENT_ID,
            client_secret=cfg.API_REDDIT_CLIENT_SECRET,
            username=cfg.API_REDDIT_USERNAME,
            user_agent=cfg.API_REDDIT_USER_AGENT,
            password=cfg.API_REDDIT_PASSWORD,
        )

        d_submission = {}
        d_watchlist_tickers = {}
        l_watchlist_links = list()
        l_watchlist_author = list()

        # psaw_api = PushshiftAPI()

        if ns_parser.b_popular:
            submissions = praw_api.subreddit("SPACs").hot(
                limit=ns_parser.n_limit)
        else:
            submissions = praw_api.subreddit("SPACs").new(
                limit=ns_parser.n_limit)

        while True:
            try:
                submission = next(submissions, None)
                if submission:
                    # Get more information about post using PRAW api
                    submission = praw_api.submission(id=submission.id)

                    # Ensure that the post hasn't been removed  by moderator in the meanwhile,
                    # that there is a description and it's not just an image, that the flair is
                    # meaningful, and that we aren't re-considering same author's watchlist
                    if (not submission.removed_by_category
                            and submission.selftext and
                            submission.link_flair_text not in ["Yolo", "Meme"]
                            and submission.author.name
                            not in l_watchlist_author):
                        l_tickers_found = find_tickers(submission)

                        if l_tickers_found:
                            # Add another author's name to the parsed watchlists
                            l_watchlist_author.append(submission.author.name)

                            # Lookup stock tickers within a watchlist
                            for key in l_tickers_found:
                                if key in d_watchlist_tickers:
                                    # Increment stock ticker found
                                    d_watchlist_tickers[key] += 1
                                else:
                                    # Initialize stock ticker found
                                    d_watchlist_tickers[key] = 1

                            l_watchlist_links.append(
                                f"https://old.reddit.com{submission.permalink}"
                            )

                            print_and_record_reddit_post(
                                d_submission, submission)

                # Check if search_submissions didn't get anymore posts
                else:
                    break
            except Exception as e:
                print(e)
                print("")

        if d_watchlist_tickers:
            lt_watchlist_sorted = sorted(d_watchlist_tickers.items(),
                                         key=lambda item: item[1],
                                         reverse=True)
            s_watchlist_tickers = ""
            n_tickers = 0
            for t_ticker in lt_watchlist_sorted:
                try:
                    # If try doesn't trigger exception, it means that this stock exists on finviz
                    # thus we can print it.
                    finviz.get_stock(t_ticker[0])
                    if int(t_ticker[1]) > 1:
                        s_watchlist_tickers += f"{t_ticker[1]} {t_ticker[0]}, "
                    n_tickers += 1
                except Exception as e:
                    print(e)
                    # pass
            if n_tickers:
                print(
                    "The following stock tickers have been mentioned more than once across the previous SPACs:"
                )
                print(s_watchlist_tickers[:-2])
        print("")

    except Exception as e:
        print(e)
        print("")
Example #5
0
def popular_tickers(l_args):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="popular",
        description="""Print latest popular tickers. [Source: Reddit] """,
    )
    parser.add_argument(
        "-n",
        "--number",
        action="store",
        dest="n_top",
        type=check_positive,
        default=10,
        help="display top N tickers",
    )
    parser.add_argument(
        "-l",
        "--limit",
        action="store",
        dest="n_limit",
        type=check_positive,
        default=50,
        help="limit of posts retrieved per sub reddit.",
    )
    parser.add_argument(
        "-s",
        "--sub",
        action="store",
        dest="s_subreddit",
        type=str,
        help="""
            subreddits to look for tickers, e.g. pennystocks,stocks.
            Default: pennystocks, RobinHoodPennyStocks, Daytrading, StockMarket, stocks, investing,
            wallstreetbets
        """,
    )
    parser.add_argument(
        "-d",
        "--days",
        action="store",
        dest="n_days",
        type=check_positive,
        default=1,
        help="look for the tickers from those n past days.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        n_ts_after = int(
            (datetime.today() - timedelta(days=ns_parser.n_days)).timestamp())

        if ns_parser.s_subreddit:
            if "," in ns_parser.s_subreddit:
                l_sub_reddits = ns_parser.s_subreddit.split(",")
            else:
                l_sub_reddits = [ns_parser.s_subreddit]
        else:
            l_sub_reddits = [
                "pennystocks",
                "RobinHoodPennyStocks",
                "Daytrading",
                "StockMarket",
                "stocks",
                "investing",
                "wallstreetbets",
            ]

        # d_submission = {}
        d_watchlist_tickers = {}
        # l_watchlist_links = list()
        l_watchlist_author = list()

        praw_api = praw.Reddit(
            client_id=cfg.API_REDDIT_CLIENT_ID,
            client_secret=cfg.API_REDDIT_CLIENT_SECRET,
            username=cfg.API_REDDIT_USERNAME,
            user_agent=cfg.API_REDDIT_USER_AGENT,
            password=cfg.API_REDDIT_PASSWORD,
        )

        psaw_api = PushshiftAPI()

        for s_sub_reddit in l_sub_reddits:
            print(
                f"Search for latest tickers under {ns_parser.n_limit} '{s_sub_reddit}' posts"
            )
            submissions = psaw_api.search_submissions(
                after=int(n_ts_after),
                subreddit=s_sub_reddit,
                limit=ns_parser.n_limit,
                filter=["id"],
            )

            n_tickers = 0
            while True:
                try:
                    submission = next(submissions, None)
                    if submission:
                        # Get more information about post using PRAW api
                        submission = praw_api.submission(id=submission.id)

                        # Ensure that the post hasn't been removed by moderator in the meanwhile,
                        # that there is a description and it's not just an image, that the flair is
                        # meaningful, and that we aren't re-considering same author's content
                        if (not submission.removed_by_category
                                and (submission.selftext or submission.title)
                                and submission.author.name
                                not in l_watchlist_author):
                            l_tickers_found = find_tickers(submission)

                            if l_tickers_found:
                                n_tickers += len(l_tickers_found)

                                # Add another author's name to the parsed watchlists
                                l_watchlist_author.append(
                                    submission.author.name)

                                # Lookup stock tickers within a watchlist
                                for key in l_tickers_found:
                                    if key in d_watchlist_tickers:
                                        # Increment stock ticker found
                                        d_watchlist_tickers[key] += 1
                                    else:
                                        # Initialize stock ticker found
                                        d_watchlist_tickers[key] = 1

                    # Check if search_submissions didn't get anymore posts
                    else:
                        break
                except Exception as e:
                    print(e)
                    print("")

            print(f"  {n_tickers} potential tickers found.")

        lt_watchlist_sorted = sorted(d_watchlist_tickers.items(),
                                     key=lambda item: item[1],
                                     reverse=True)

        if lt_watchlist_sorted:
            n_top_stocks = 0
            # pylint: disable=redefined-outer-name
            popular_tickers = []
            for t_ticker in lt_watchlist_sorted:
                if n_top_stocks > ns_parser.n_top:
                    break
                try:
                    # If try doesn't trigger exception, it means that this stock exists on finviz
                    # thus we can print it.
                    stock_info = finviz.get_stock(t_ticker[0])
                    popular_tickers.append((
                        t_ticker[1],
                        t_ticker[0],
                        stock_info["Company"],
                        stock_info["Sector"],
                        stock_info["Price"],
                        stock_info["Change"],
                        stock_info["Perf Month"],
                        f"https://finviz.com/quote.ashx?t={t_ticker[0]}",
                    ))
                    n_top_stocks += 1
                except HTTPError as e:
                    if e.response.status_code != 404:
                        print(f"Unexpected exception from Finviz: {e}")
                except Exception as e:
                    print(e)

            popular_tickers_df = pd.DataFrame(
                popular_tickers,
                columns=[
                    "Mentions",
                    "Ticker",
                    "Company",
                    "Sector",
                    "Price",
                    "Change",
                    "Perf Month",
                    "URL",
                ],
            )

            print(
                f"\nThe following TOP {ns_parser.n_top} tickers have been mentioned in the last {ns_parser.n_days} days:"
            )

            print(popular_tickers_df)
            print("")
        else:
            print("No tickers found")

        print("")

    except ResponseException as e:
        if e.response.status_code == 401:
            print(
                "Received a response from Reddit with an authorization error. Check your token."
            )
            print("")

    except Exception as e:
        print(e)
        print("")
Example #6
0
def inference(l_args, s_ticker):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="infer",
        description="""
            Print quick sentiment inference from last tweets that contain the ticker.
            This model splits the text into character-level tokens and uses the DistilBERT
            model to make predictions. DistilBERT is a distilled version of the powerful
            BERT transformer model. Not only time period of these, but also frequency.
            [Source: Twitter]
        """,
    )

    parser.add_argument(
        "-n",
        "--num",
        action="store",
        dest="n_num",
        type=int,
        default=100,
        choices=range(10, 101),
        help="num of latest tweets to infer from.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        # Get tweets using Twitter API
        params = {
            "q": "$" + s_ticker,
            "tweet_mode": "extended",
            "lang": "en",
            "count": str(ns_parser.n_num),
        }

        # Request Twitter API
        response = requests.get(
            "https://api.twitter.com/1.1/search/tweets.json",
            params=params,
            headers={
                "authorization": "Bearer " + cfg.API_TWITTER_BEARER_TOKEN
            },
        )

        # Create dataframe
        df_tweets = pd.DataFrame()

        # Check that the API response was successful
        if response.status_code == 200:
            for tweet in response.json()["statuses"]:
                row = get_data(tweet)
                df_tweets = df_tweets.append(row, ignore_index=True)

        # Load sentiment model
        sentiment_model = flair.models.TextClassifier.load("en-sentiment")
        print("")

        # We will append probability/sentiment preds later
        probs = []
        sentiments = []
        for s_tweet in df_tweets["text"].to_list():
            tweet = clean_tweet(s_tweet, s_ticker)

            # Make sentiment prediction
            sentence = flair.data.Sentence(tweet)
            sentiment_model.predict(sentence)

            # Extract sentiment prediction (POSITIVE/NEGATIVE) and confidence (0-1)
            probs.append(sentence.labels[0].score)
            sentiments.append(sentence.labels[0].value)

        # Add probability and sentiment predictions to tweets dataframe
        df_tweets["probability"] = probs
        df_tweets["sentiment"] = sentiments

        # Add sentiment estimation (probability positive for POSITIVE sentiment,
        # and negative for NEGATIVE sentiment)
        df_tweets["sentiment_estimation"] = df_tweets.apply(
            lambda row: row["probability"] *
            (-1, 1)[row["sentiment"] == "POSITIVE"],
            axis=1,
        ).cumsum()
        # Cumulative sentiment_estimation
        df_tweets["prob_sen"] = df_tweets.apply(
            lambda row: row["probability"] *
            (-1, 1)[row["sentiment"] == "POSITIVE"],
            axis=1,
        )

        # Percentage of confidence
        if df_tweets["sentiment_estimation"].values[-1] > 0:
            n_pos = df_tweets[df_tweets["prob_sen"] > 0]["prob_sen"].sum()
            # pylint: disable=unused-variable
            n_pct = round(100 * n_pos / df_tweets["probability"].sum())
        else:
            n_neg = abs(df_tweets[df_tweets["prob_sen"] < 0]["prob_sen"].sum())
            n_pct = round(100 * n_neg /
                          df_tweets["probability"].sum())  # noqa: F841

        # Parse tweets
        dt_from = dateutil.parser.parse(df_tweets["created_at"].values[-1])
        dt_to = dateutil.parser.parse(df_tweets["created_at"].values[0])
        print(f"From: {dt_from.strftime('%Y-%m-%d %H:%M:%S')}")
        print(f"To:   {dt_to.strftime('%Y-%m-%d %H:%M:%S')}")

        print(f"{len(df_tweets)} tweets were analyzed.")
        dt_delta = dt_to - dt_from
        n_freq = dt_delta.total_seconds() / len(df_tweets)
        print(f"Frequency of approx 1 tweet every {round(n_freq)} seconds.")

        s_sen = f"{('NEGATIVE', 'POSITIVE')[int(df_tweets['sentiment_estimation'].values[-1] > 0)]}"
        print(f"The sentiment of {s_ticker} is: {s_sen} ({n_pct} %)")
        print("")

    except Exception as e:
        print(e)
        print("")
Example #7
0
def sentiment(l_args, s_ticker):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="sen",
        description="""
            Plot in-depth sentiment predicted from tweets from last days
            that contain pre-defined ticker. This model splits the text into character-level
            tokens and uses the DistilBERT model to make predictions. DistilBERT is a distilled
            version of the powerful BERT transformer model. Note that a big num of tweets
            extracted per hour in conjunction with a high number of days in the past, will make the
            algorithm take a long period of time to estimate sentiment. [Source: Twitter]
        """,
    )

    # in reality this argument could be 100, but after testing it takes too long
    # to compute which may not be acceptable
    parser.add_argument(
        "-n",
        "--num",
        action="store",
        dest="n_tweets",
        type=int,
        default=10,
        choices=range(10, 61),
        help="num of tweets to extract per hour.",
    )
    parser.add_argument(
        "-d",
        "--days",
        action="store",
        dest="n_days_past",
        type=int,
        default=7,
        choices=range(1, 8),
        help="num of days in the past to extract tweets.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        # Setup API request params and headers
        headers = {"authorization": f"Bearer {cfg.API_TWITTER_BEARER_TOKEN}"}
        params = {
            "query": f"({s_ticker}) (lang:en)",
            "max_results": str(ns_parser.n_tweets),
            "tweet.fields": "created_at,lang",
        }

        # Date format string required by twitter
        dtformat = "%Y-%m-%dT%H:%M:%SZ"

        # Algorithm to extract
        dt_recent = datetime.now() - timedelta(seconds=20)
        dt_old = dt_recent - timedelta(days=ns_parser.n_days_past)
        print(
            f"From {dt_recent.date()} retrieving {ns_parser.n_tweets*24} tweets ({ns_parser.n_tweets} tweets/hour)"
        )

        df_tweets = pd.DataFrame()
        while True:
            # Iterate until we haven't passed the old number of days
            if dt_recent < dt_old:
                break

            # Update past datetime
            dt_past = dt_recent - timedelta(minutes=60)

            if dt_past.day < dt_recent.day:
                print(
                    f"From {dt_past.date()} retrieving {ns_parser.n_tweets*24} tweets ({ns_parser.n_tweets} tweets/hour)"
                )

            # Assign from and to datetime parameters for the API
            params["start_time"] = dt_past.strftime(dtformat)
            params["end_time"] = dt_recent.strftime(dtformat)

            # Send API request
            response = requests.get(
                "https://api.twitter.com/2/tweets/search/recent",
                params=params,
                headers=headers,
            )

            # Update recent datetime
            dt_recent = dt_past

            # If response from API request is a success
            if response.status_code == 200:

                # Iteratively append our tweet data to our dataframe
                for tweet in response.json()["data"]:
                    row = get_data(tweet)
                    df_tweets = df_tweets.append(row, ignore_index=True)

        # Load sentiment model
        print("")
        sentiment_model = flair.models.TextClassifier.load("en-sentiment")
        print("")

        # Append probability and sentiment preds later
        probs = []
        sentiments = []
        for s_tweet in df_tweets["text"].to_list():
            tweet = clean_tweet(s_tweet, s_ticker)

            # Make sentiment prediction
            sentence = flair.data.Sentence(tweet)
            sentiment_model.predict(sentence)

            # Extract sentiment prediction (POSITIVE/NEGATIVE) and confidence (0-1)
            probs.append(sentence.labels[0].score)
            sentiments.append(sentence.labels[0].value)

        # Add probability and sentiment predictions to tweets dataframe
        df_tweets["probability"] = probs
        df_tweets["sentiment"] = sentiments

        # Sort tweets per date
        df_tweets.sort_index(ascending=False, inplace=True)

        # Add sentiment estimation (probability positive for POSITIVE sentiment, and negative for NEGATIVE sentiment)
        df_tweets["sentiment_estimation"] = df_tweets.apply(
            lambda row: row["probability"] *
            (-1, 1)[row["sentiment"] == "POSITIVE"],
            axis=1,
        ).cumsum()
        # Cumulative sentiment_estimation
        df_tweets["prob_sen"] = df_tweets.apply(
            lambda row: row["probability"] *
            (-1, 1)[row["sentiment"] == "POSITIVE"],
            axis=1,
        )

        # Percentage of confidence
        if df_tweets["sentiment_estimation"].values[-1] > 0:
            n_pos = df_tweets[df_tweets["prob_sen"] > 0]["prob_sen"].sum()
            n_pct = round(100 * n_pos / df_tweets["probability"].sum())
        else:
            n_neg = abs(df_tweets[df_tweets["prob_sen"] < 0]["prob_sen"].sum())
            n_pct = round(100 * n_neg / df_tweets["probability"].sum())
        s_sen = f"{('NEGATIVE', 'POSITIVE')[int(df_tweets['sentiment_estimation'].values[-1] > 0)]}"

        # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)
        df_tweets.reset_index(inplace=True)

        # Plotting
        plt.subplot(211)
        plt.title(
            f"Twitter's {s_ticker} sentiment over time is {s_sen} ({n_pct} %)")
        plt.plot(df_tweets.index,
                 df_tweets["sentiment_estimation"].values,
                 lw=3,
                 c="cyan")
        plt.xlim(df_tweets.index[0], df_tweets.index[-1])
        plt.grid(b=True,
                 which="major",
                 color="#666666",
                 linestyle="-",
                 lw=1.5,
                 alpha=0.5)
        plt.minorticks_on()
        plt.grid(b=True,
                 which="minor",
                 color="#999999",
                 linestyle="-",
                 alpha=0.2)
        plt.ylabel("Cumulative Sentiment")
        l_xticks = list()
        l_xlabels = list()
        l_xticks.append(0)
        l_xlabels.append(df_tweets["created_at"].values[0].split(" ")[0])
        n_day = datetime.strptime(df_tweets["created_at"].values[0],
                                  "%Y-%m-%d %H:%M:%S").day
        n_idx = 0
        n_next_idx = 0
        for n_next_idx, dt_created in enumerate(df_tweets["created_at"]):
            if datetime.strptime(dt_created, "%Y-%m-%d %H:%M:%S").day > n_day:
                l_xticks.append(n_next_idx)
                l_xlabels.append(
                    df_tweets["created_at"].values[n_next_idx].split(" ")[0])
                l_val_days = (
                    df_tweets["sentiment_estimation"].values[n_idx:n_next_idx]
                    - df_tweets["sentiment_estimation"].values[n_idx])
                plt.plot(range(n_idx, n_next_idx),
                         l_val_days,
                         lw=3,
                         c="tab:blue")
                n_day_avg = np.mean(l_val_days)
                if n_day_avg > 0:
                    plt.hlines(
                        n_day_avg,
                        n_idx,
                        n_next_idx,
                        linewidth=2.5,
                        linestyle="--",
                        color="green",
                        lw=3,
                    )
                else:
                    plt.hlines(
                        n_day_avg,
                        n_idx,
                        n_next_idx,
                        linewidth=2.5,
                        linestyle="--",
                        color="red",
                        lw=3,
                    )
                n_idx = n_next_idx
                n_day += 1
        l_val_days = (df_tweets["sentiment_estimation"].values[n_idx:] -
                      df_tweets["sentiment_estimation"].values[n_idx])
        plt.plot(range(n_idx, len(df_tweets)), l_val_days, lw=3, c="tab:blue")
        n_day_avg = np.mean(l_val_days)
        if n_day_avg > 0:
            plt.hlines(
                n_day_avg,
                n_idx,
                len(df_tweets),
                linewidth=2.5,
                linestyle="--",
                color="green",
                lw=3,
            )
        else:
            plt.hlines(
                n_day_avg,
                n_idx,
                len(df_tweets),
                linewidth=2.5,
                linestyle="--",
                color="red",
                lw=3,
            )
        l_xticks.append(len(df_tweets))
        # (unused?) datetime.strptime(dt_created, "%Y-%m-%d %H:%M:%S") + timedelta(days=1)
        l_xlabels.append(
            datetime.strftime(
                datetime.strptime(
                    df_tweets["created_at"].values[len(df_tweets) - 1],
                    "%Y-%m-%d %H:%M:%S",
                ) + timedelta(days=1),
                "%Y-%m-%d",
            ))
        plt.xticks(l_xticks, l_xlabels)
        plt.axhspan(plt.gca().get_ylim()[0], 0, facecolor="r", alpha=0.1)
        plt.axhspan(0, plt.gca().get_ylim()[1], facecolor="g", alpha=0.1)

        plt.subplot(212)
        plt.bar(
            df_tweets[df_tweets["prob_sen"] > 0].index,
            df_tweets[df_tweets["prob_sen"] > 0]["prob_sen"].values,
            color="green",
        )
        plt.bar(
            df_tweets[df_tweets["prob_sen"] < 0].index,
            df_tweets[df_tweets["prob_sen"] < 0]["prob_sen"].values,
            color="red",
        )
        for l_x in l_xticks[1:]:
            plt.vlines(l_x,
                       -1,
                       1,
                       linewidth=2,
                       linestyle="--",
                       color="k",
                       lw=3)
        plt.xlim(df_tweets.index[0], df_tweets.index[-1])
        plt.xticks(l_xticks, l_xlabels)
        plt.grid(b=True, which="major", color="#666666", linestyle="-")
        plt.ylabel("Sentiment")
        plt.xlabel("Time")
        plt.show()

    except Exception as e:
        print(e)
        print("")