def get_news_markdown(ticker_symbol, count):
    """
    Returns a markdown text string which has `count` top news sources.

    Parameters
    ----------

    ticker_symbol: str
        Stock Ticker Sumbol Value

    count: int
        Number of news items to return

    Returns
    -------

    joined_str: str
        String containing markdown news sources

    """
    news = finviz.get_news(ticker_symbol)[:count]
    joined_str = ''
    i = 1
    for news_item in news:
        title = news_item[0]
        url = news_item[1]
        joined_str += str(i) + '. [' + title + '](' + url + ')\n'
        i += 1
    return joined_str
def news(l_args, s_ticker):
    parser = argparse.ArgumentParser(
        prog='news',
        description=
        """Prints latest news about company, including title and web link.
                                     [Source: Finviz]""")

    parser.add_argument('-n',
                        "--num",
                        action="store",
                        dest="n_num",
                        type=check_positive,
                        default=5,
                        help='Number of latest news being printed.')

    (ns_parser, l_unknown_args) = parser.parse_known_args(l_args)

    if l_unknown_args:
        print(
            f"The following args couldn't be interpreted: {l_unknown_args}\n")
        return

    d_finviz_news = finviz.get_news(s_ticker)
    i = 0
    for s_news_title, s_news_link in {*d_finviz_news}:
        print(f"-> {s_news_title}")
        print(f"{s_news_link}\n")
        i += 1

        if i > (ns_parser.n_num - 1):
            break

    print("")
Esempio n. 3
0
def scrape(request):
    # scraping for ticker name
    company = request.GET['company']
    r = requests.get(
        'https://www.marketwatch.com/tools/quotes/lookup.asp?siteID=mktw&Lookup='
        + company + '&Country=All&Type=All')
    soup = bs4.BeautifulSoup(r.text, "lxml")
    table = soup.find('div', class_='results')
    rows = table.find_all('td', class_='bottomborder')
    use = rows[0].text
    ticker = yf.Ticker(use)
    # converting into csv and json files
    hist = ticker.history(period="max")
    hist.to_csv('hist.csv')
    hist.to_json('hist.json')
    # Scraping details from Yahoo finance
    l = requests.get('https://in.finance.yahoo.com/quote/' + use + '?p=' +
                     use + '&.tsrc=fin-srch')
    soup2 = bs4.BeautifulSoup(l.text, "lxml")
    div1 = soup2.find('div', class_='Mt(15px)').text
    div1_1 = soup2.find(
        'div',
        class_=
        'quote-header-section Cf Pos(r) Mb(5px) Maw($maxModuleWidth) Miw($minGridWidth) smartphone_Miw(ini) Miw(ini)!--tab768 Miw(ini)!--tab1024 Mstart(a) Mend(a) Px(20px) smartphone_Pb(0px) smartphone_Mb(0px)'
    )
    div2_1 = div1_1.find('div',
                         class_='My(6px) Pos(r) smartphone_Mt(6px)').text
    # plotting graph of close price
    df = pd.read_csv('hist.csv', )
    df['movavg'] = df['Close'].rolling(window=30).mean()
    fig = plt.line(df,
                   x='Date',
                   y='Close',
                   title='Detailed Graph',
                   hover_name='Date')
    fig.add_scatter(x=df['Date'],
                    y=df['movavg'],
                    mode='lines',
                    name='Moving Average')
    fig.add_scatter(x=df['Date'], y=df['Open'], mode='lines', name='Open')
    fig.add_scatter(x=df['Date'], y=df['High'], mode='lines', name='High')
    fig.add_scatter(x=df['Date'], y=df['Low'], mode='lines', name='Low')
    fig.add_scatter(x=df['Date'],
                    y=df['Volume'],
                    mode='lines',
                    name='Volume',
                    visible='legendonly')
    graph = fig.to_html(full_html=False)

    #Scraping for sentiment analysis
    # data preprocessing
    news = finviz.get_news(use)
    df = pd.DataFrame(news, columns=['text', 'link'])
    df.to_csv("News.csv")  # news data scraped and saved to csv file
    return render(request, "userhome.html", {
        'div1': div1,
        'div2_1': div2_1,
        'graph': graph
    })
def get_news(ticker):
    try:
        news = finviz.get_news(ticker)
        news = pd.DataFrame(news, columns=['Headline', 'Link'])
        news = news.set_index('Headline')
        return news
    except Exception as e:
        return e
Esempio n. 5
0
def get_news(ticker: str) -> List[Any]:
    """Get news from Finviz

    Parameters
    ----------
    ticker : str
        Stock ticker

    Returns
    -------
    List[Any]
        News
    """
    return finviz.get_news(ticker)
def get_news(ticker: str) -> Dict:
    """Get news from Finviz

    Parameters
    ----------
    ticker : str
        Stock ticker

    Returns
    -------
    Dict
        News
    """
    return finviz.get_news(ticker)
Esempio n. 7
0
def news(other_args: List[str], ticker: str):
    """Display news for a given stock ticker

    Parameters
    ----------
    other_args : List[str]
        argparse other args - ["-n", "10"]
    ticker : str
        Stock ticker
    """

    parser = argparse.ArgumentParser(
        add_help=False,
        prog="news",
        description="""
            Prints latest news about company, including title and web link. [Source: Finviz]
        """,
    )

    parser.add_argument(
        "-n",
        "--num",
        action="store",
        dest="n_num",
        type=check_positive,
        default=5,
        help="Number of latest news being printed.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, other_args)
        if not ns_parser:
            return

        d_finviz_news = finviz.get_news(ticker)
        i = 0
        for s_news_title, s_news_link in {*d_finviz_news}:
            print(f"-> {s_news_title}")
            print(f"{s_news_link}\n")
            i += 1

            if i > (ns_parser.n_num - 1):
                break

        print("")

    except Exception as e:
        print(e)
        print("")
        return
Esempio n. 8
0
def news(l_args, s_ticker):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="news",
        description="""
            Prints latest news about company, including title and web link. [Source: Finviz]
        """,
    )

    parser.add_argument(
        "-n",
        "--num",
        action="store",
        dest="n_num",
        type=check_positive,
        default=5,
        help="Number of latest news being printed.",
    )

    try:
        ns_parser = parse_known_args_and_warn(parser, l_args)
        if not ns_parser:
            return

        d_finviz_news = finviz.get_news(s_ticker)
        i = 0
        for s_news_title, s_news_link in {*d_finviz_news}:
            print(f"-> {s_news_title}")
            print(f"{s_news_link}\n")
            i += 1

            if i > (ns_parser.n_num - 1):
                break

        print("")

    except Exception as e:
        print(e)
        print("")
        return
Esempio n. 9
0
async def get_stock_news(stock: str):
    try:
        news = finviz.get_news(stock)
        return {"success": True, "news": news[0:5]}
    except requests.exceptions.HTTPError:
        return {"success": False, "message": "Not Found"}
Esempio n. 10
0
 def response():
     return finviz.get_news(ticker)
Esempio n. 11
0
import sys
sys.path.insert(0, 'finviz')
import finviz

result = finviz.get_news(sys.argv[1])
for x in range(len(result)):
    print(result[x])