Example #1
0
def about():
    newsapi = NewsApiClient(api_key="b0f75ce660c0466a9a98c2478f8abb62")
    topheadlines = newsapi.get_top_headlines(sources="al-jazeera-english")
 
 
    articles = topheadlines['articles']
 
    desc = []
    news = []
    img = []
 
 
    for i in range(len(articles)):
        myarticles = articles[i]
 
 
        news.append(myarticles['title'])
        desc.append(myarticles['description'])
        img.append(myarticles['urlToImage'])
 
 
 
    mylist = zip(news, desc, img)
 
 
    return render_template('about.html', context = mylist)
Example #2
0
def news(request):
    try:
        queries = News.objects.all()
        api = NewsApiClient(api_key='728bb1f02da34d37b4a5da9f67b87fbe')
        headlines = api.get_top_headlines(sources='techcrunch')
        if len(queries) == 0:
            for item in headlines["articles"]:
                temp = News(author=item["author"],
                            title=item["title"],
                            description=item["description"],
                            url=item["url"],
                            urlToImage=item["urlToImage"])
                temp.save()
            queries = News.objects.all()
        args = {"queries": queries}
        if headlines["status"] == "ok":
            modified_queries = dicToQueries(headlines, queries)
            args = {"queries": modified_queries}
        else:
            args = {"queries": queries}
        return render(request, 'cosmos/news.html', args)
    except ConnectionError:
        return render(request, 'cosmos/news.html', args)
    except ConnectTimeout:
        return render(request, 'cosmos/news.html', args)
Example #3
0
def bbc(request):
    if request.method == 'POST':
        keyword_to_search = request.POST['searchbar']
        print(keyword_to_search)
        if 'email' in request.session.keys():
            logged_user = User.objects.get(email=request.session['email'])
            searched_keyword = logged_user.searches.filter(
                keyword=keyword_to_search)
            print(searched_keyword)
            if len(searched_keyword) == 0:
                dk = Search.objects.create(keyword=keyword_to_search)
                dk.user.add(logged_user)
        else:
            logged_user = '******'
        newsapi = NewsApiClient(api_key="959759915d66465fbff7c7ec6993daf7")
        topheadlines = newsapi.get_top_headlines(sources='bbc-news',
                                                 q=keyword_to_search,
                                                 language='en')
        articles = topheadlines['articles']
        desc = []
        news = []
        img = []
        content = []
        url = []
        for i in range(len(articles)):
            if True:  #keyword in articles[i]['title'] or keyword in articles[i]['description']:
                myarticles = articles[i]
                news.append(myarticles['title'])
                desc.append(myarticles['description'])
                img.append(myarticles['urlToImage'])
                content.append(myarticles['content'])
                url.append(myarticles['url'])
        mylist = zip(news, desc, img, content, url)
        context = {"mylist": mylist, "curr_user": logged_user}
        return render(request, 'bbc.html', context)
Example #4
0
def blog():
    page = request.args.get('page', 1, type=int)
    posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page,
                                                                  per_page=10)

    newsapi = NewsApiClient(api_key="12692f50f97a481cb516c2d540b6dc15")
    topheadlines = newsapi.get_top_headlines(
        sources=
        "google-news-in,the-verge,bbc-news,wired,business-insider,google-news",
        page_size=10)

    articles = topheadlines['articles']

    desc = []
    news = []
    img = []

    for i in range(len(articles)):
        myarticles = articles[i]

        img.append(myarticles['urlToImage'])
        news.append(myarticles['title'])
        desc.append(myarticles['description'])

    mylist = zip(news, desc, img)

    return render_template('blog.html', context=mylist, posts=posts)
Example #5
0
    def get_top_headlines(cls):
        newsapi = NewsApiClient(api_key=settings.NEWS_API_KEY)
        top_headlines = newsapi.get_top_headlines(language='en',
                                                  country='us',
                                                  page_size=100)
        articles = top_headlines.get('articles', [])
        news_headlines = []
        batch_size = top_headlines.get('totalResults', 20)

        for article in articles:
            source = article['source']
            source_id = source['id']
            source_name = source['name']
            author = article['author']
            title = article['title']
            description = article['description']
            url = article['url']
            url_to_image = article['urlToImage']
            published_at = datetime.datetime.strptime(article['publishedAt'],
                                                      '%Y-%m-%dT%H:%M:%SZ')
            print(article)
            news_article = cls(
                source_id=source_id,
                source_name=source_name,
                author=author,
                title=title,
                description=description,
                url=url,
                url_to_image=url_to_image,
                published_at=published_at,
            )

            news_headlines.append(news_article)

        return cls.objects.bulk_create(news_headlines, batch_size)
Example #6
0
def news(request):
    # news API to show the latest news
    newsapi = NewsApiClient(api_key='1aab8f2e782a4a588fc28a3292a57979')
    top = newsapi.get_top_headlines(sources='cnn')

    l = top['articles']
    desc = []
    news = []
    img = []
    urllink = []

    for i in range(len(l)):
        f = l[i]
        news.append(f['title'])
        desc.append(f['description'])
        img.append(f['urlToImage'])
        urllink.append(f['url'])

    mylist = list(zip(news, desc, img, urllink))
    user = get_user_type(request)
    args = {
        'mylist': mylist,
        'obj': user['obj'],
        'user_type': user['user_type']
    }

    return render(request, 'news.html', args)
Example #7
0
def bbc():
    newsapi = NewsApiClient(api_key)
    topheadlines = newsapi.get_top_headlines(
        sources=
        "abc-news, al-jazeera-english, associated-press, axios, bbc-news, bloomberg, business-insider, buzzfeed, cbs-news, cnn, independent, msnbc, mtv-news, national-geographic, nbc-news, newsweek, new-york-magazine, politico, the-hill, the-huffington-post, the-wall-street-journal,the-washington-post, time, usa-today, vice-news"
    )

    articles = topheadlines['articles']

    news = []
    img = []
    desc = []
    url = []
    source = []

    for i in range(len(articles)):
        myarticles = articles[i]
        news.append(myarticles['title'])
        img.append(myarticles['urlToImage'])
        desc.append(myarticles['description'])
        url.append(myarticles['url'])
        source.append(myarticles['source'])

    mylist = zip(news, desc, img, url, source)
    return render_template('bbc.html',
                           title="Starbucks Sipping Hall Monitors",
                           context=mylist)
Example #8
0
def busin(request):

    newsapi = NewsApiClient(api_key='f776a61cf60744cf9f5473701caea44b')
    top = newsapi.get_top_headlines(
        sources='business-insider,cnbc,financial-post,fortune')

    l = top['articles']
    desc = []
    news = []
    img = []
    link = []
    published = []
    author = []

    for i in range(len(l)):
        f = l[i]
        news.append(f['title'])
        desc.append(f['description'])
        img.append(f['urlToImage'])
        link.append(f['url'])
        published.append(f['publishedAt'])
        author.append(f['author'])

    mylist = zip(news, desc, img, link, published, author)

    return render(request, 'busin.html', context={"mylist": mylist})
Example #9
0
def science_news(request):
    newsapi = NewsApiClient(api_key='e45eba9d14604d7dabbfbe7f3db2db90')
    science_headlines = newsapi.get_top_headlines(category='science',
                                                  language='en',
                                                  country='in')
    context = {'articles': science_headlines.get('articles')}
    return render(request, 'topnews/topnews.html', context)
Example #10
0
def world(request):

    newsapi = NewsApiClient(api_key='f776a61cf60744cf9f5473701caea44b')
    top = newsapi.get_top_headlines(
        sources='bbc-news,google-news,vice-news,al-jazeera-english')

    l = top['articles']
    desc = []
    news = []
    img = []
    link = []
    published = []
    author = []

    for i in range(len(l)):
        f = l[i]
        news.append(f['title'])
        desc.append(f['description'])
        img.append(f['urlToImage'])
        link.append(f['url'])
        published.append(f['publishedAt'])
        author.append(f['author'])

    mylist = zip(news, desc, img, link, published, author)

    return render(request, 'world.html', context={"mylist": mylist})
Example #11
0
def sports(request):

    newsapi = NewsApiClient(api_key='f776a61cf60744cf9f5473701caea44b')
    top = newsapi.get_top_headlines(
        sources='espn,talksport,bbc-sport,espn-cric-info')

    l = top['articles']
    desc = []
    news = []
    img = []
    link = []
    published = []
    author = []

    for i in range(len(l)):
        f = l[i]
        news.append(f['title'])
        desc.append(f['description'])
        img.append(f['urlToImage'])
        link.append(f['url'])
        published.append(f['publishedAt'])
        author.append(f['author'])

    mylist = zip(news, desc, img, link, published, author)

    return render(request, 'sports.html', context={"mylist": mylist})
Example #12
0
def ente(request):

    newsapi = NewsApiClient(api_key='f776a61cf60744cf9f5473701caea44b')
    top = newsapi.get_top_headlines(
        sources='buzzfeed,entertainment-weekly,ign,mashable,mtv-news')

    l = top['articles']
    desc = []
    news = []
    img = []
    link = []
    published = []
    author = []

    for i in range(len(l)):
        f = l[i]
        news.append(f['title'])
        desc.append(f['description'])
        img.append(f['urlToImage'])
        link.append(f['url'])
        published.append(f['publishedAt'])
        author.append(f['author'])

    mylist = zip(news, desc, img, link, published, author)

    return render(request, 'ente.html', context={"mylist": mylist})
def updateLDA():
    api_file="./newsapi.key"
    categories=['business', 'entertainment', 'general', 'health', 'science', 'sports', 'technology']
    
    with open(api_file,"r") as apikey:
        newsapi=NewsApiClient(api_key=apikey.read().strip())
    
    headlines={cat:newsapi.get_top_headlines(category=cat, language='en', country='in') for cat in categories}
    pp_docs=[]
    
    for category in headlines:
        for article in headlines[category]['articles']:
            #print(lemma_pp(article['title']))
            pp_docs.append(lemma_pp(article['title']))
            
            
    if os.path.exists(MODEL_DIR+"corpus_dict.model"):
        corp_d=Dictionary.load(MODEL_DIR+"corpus_dict.model")
        corp_d.add_documents(pp_docs)
    else:
        corp_d = Dictionary(pp_docs)
        corp_d.filter_extremes(no_below=2, no_above=0.5)
    
    
    dtm=[corp_d.doc2bow(doc) for doc in pp_docs]
    
    tfidf=TfidfModel(dtm)
    corp_tfidf=tfidf[dtm]
    
    lda = LdaMulticore(corp_tfidf, num_topics=5, id2word=corp_d, passes=60, workers=3)
    print(lda.print_topics(num_topics=5, num_words=5))
    checkdir(MODEL_DIR)
    corp_d.save(MODEL_DIR+"corpus_dict.model")
    #corp_tfidf.save(MODEL_DIR+"corpus_tfidf.model")
    lda.save(MODEL_DIR+"lda.model")
def wordcloud():

    newsapi = NewsApiClient(api_key='fd4c0e4b873343a3a0a7b50168a89e9a')
    top_headlines = newsapi.get_top_headlines(language='en', page_size=60)
    articles = top_headlines["articles"]
    file1 = open('myfile.txt', 'r')
    Lines = file1.readlines()
    sw = set()
    for line in Lines:
        sw.add(line.strip())
    x = set()
    mydic = collections.defaultdict(int)
    for article in articles:
        headline = article["title"]
        words = headline.split()
        #print("words is ", words)
        for word in words:
            word = word.lower()
            if word.isalpha() and word not in sw:
                mydic[word] += 1

    mywords = []

    mykeys = sorted(mydic, key=lambda k: mydic[k])
    mykeys = mykeys[::-1]
    #print(mykeys)
    for key in mykeys:
        e = dict()
        e["word"] = key
        e["size"] = mydic[key]
        mywords.append(e)
    #print(mywords)
    mywords = mywords[:30]
    return jsonify(cloudwords=mywords)
Example #15
0
def index(request):

    newsapi = NewsApiClient(api_key='93ec2510842647889c3913445b754c83')
    # /v2/top-headlines
    top_headlines = newsapi.get_top_headlines(sources='bbc-news,the-verge')
    # category='health',
    # language='en',
    # country='us')

    all_articles = top_headlines['articles']

    desc = []
    news = []
    img = []
    url = []

    for i in range(len(all_articles)):
        my_articles = all_articles[i]

        news.append(my_articles['title'])
        desc.append(my_articles['description'])
        url.append(my_articles['url'])
        img.append(my_articles['urlToImage'])

    mylist = zip(news, desc, url, img)

    return render(request, 'index.html', context={'mylist': mylist})
Example #16
0
def search_top_publication_by_keywords(q, country, language, page_size, page):
    """
    Функция предоставляет последние топовые публикации по ключевым словам
    """
    try:
        page = int(page)
    except ValueError:
        print('Номер страницы не может быть нулевым или являться строкой')
        page = int(input('Введите номер страницы заново: '))
    except TypeError:
        print('Номер страницы не может являться строкой')
        page = int(input('Введите номер страницы заново: '))

    try:
        page_size = int(page_size)
    except ValueError:
        print(
            'Размер страницы не может быть нулевым, быть больше 25 или являться строкой'
        )
        page_size = int(input('Введите размер страница заново: '))
    except TypeError:
        print('Размер страницы не может являться строкой')
        page_size = int(input('Введите размер страницы заново: '))

    res = NewsApiClient(api_key='5ec9e95dabaa4e25a0877794212d9b55')

    top_headlines = res.get_top_headlines(q=q,
                                          country=country,
                                          language=language,
                                          page_size=int(page_size),
                                          page=int(page))

    print(top_headlines)
Example #17
0
def search(request):
    query = request.GET['query']
    newsapi = NewsApiClient(api_key='f2d21efaf1e4456db37ccaefbb14a4c2')
    top = newsapi.get_top_headlines(q=(str(query)), language='en')

    l = top['articles']

    for i in range(len(l)):
        f = l[i]
        f['publishedAt'] = f['publishedAt'][0:10]
        if not newslist.objects.filter(title=f['title']).exists():
            newslist(title=f['title'],
                     description=f['description'],
                     image=f['urlToImage'],
                     author=f['author'],
                     url=f['url'],
                     publishedAt=f['publishedAt'],
                     content=f['content']).save()
    if len(query) > 100:
        allPosts = []
    else:
        allPosts = (newslist.objects.filter(
            title__icontains=query).order_by('-post_id')
                    or Newscountrywise.objects.filter(
                        title__icontains=query).order_by('-post_id')
                    or Newscatwise.objects.filter(
                        title__icontains=query).order_by('-post_id'))
    if not allPosts:
        messages.error(request, "Please enter valid search")
    params = {'allPosts': allPosts, 'query': query}
    return render(request, 'newsapp/search.html', params)
Example #18
0
	def run(self, dispatcher, tracker, domain):
            from newsapi import NewsApiClient
            
            # Init
            newsapi = NewsApiClient(api_key='f908755783e34e738776e64eeacfbd17')

            print(tracker.get_slot('country'))

            country = tracker.get_slot('country')
            
            country_iso2 = coco.convert(names=country, to='ISO2', not_found=None)

            print(country_iso2)

            country_iso2_low = country_iso2.lower()

            # /v2/top-headlines
            top_headlines = newsapi.get_top_headlines(#q=country,
                                                    #sources='bbc-news,the-verge',
                                                    country=country_iso2_low)

            news_json = json.dumps(top_headlines)

            list_articles = json.loads(news_json)

            for articles in list_articles['articles']:
                print('Author: ' + str(articles['author']))
                print('Source: ' + str(articles['source']['name']))
                print('Title: ' + str(articles['title']))
                print('URL: ' + str(articles['url']) + "\n")
                print("############################################################")
                dispatcher.utter_message('**Source:** ' + str(articles['source']['name']) + '\n **Title:** ' + str(articles['title']) + '\n **URL:** ' + str(articles['url']) + '\n')
Example #19
0
def news(news_source: str = 'fox') -> None:
    """Says news around the user's location.

    Args:
        news_source: Source from where the news has to be fetched. Defaults to ``fox``.
    """
    if not env.news_api:
        logger.warning("News apikey not found.")
        support.no_env_vars()
        return

    sys.stdout.write(f'\rGetting news from {news_source} news.')
    news_client = NewsApiClient(api_key=env.news_api)
    try:
        all_articles = news_client.get_top_headlines(
            sources=f'{news_source}-news')
    except newsapi_exception.NewsAPIException:
        speaker.speak(
            text=f"I wasn't able to get the news {env.title}! "
            "I think the News API broke, you may try after sometime.")
        return

    speaker.speak(text="News around you!")
    speaker.speak(text=' '.join(
        [article['title'] for article in all_articles['articles']]))
    if shared.called_by_offline:
        return

    if shared.called['report'] or shared.called['time_travel']:
        speaker.speak(run=True)
Example #20
0
def sport():
    newsapi = NewsApiClient(api_key='b92cb162f8064f81964d4268970377c1')

    top_headlines = newsapi.get_top_headlines(category='sports',
                                              language='en',
                                              country='in')

    articles = top_headlines['articles']

    print(len(articles))

    desc = []
    news = []
    img = []
    link = []

    for i in range(len(articles)):
        myarticle = articles[i]
        news.append(myarticle['title'])
        desc.append(myarticle['description'])
        img.append(myarticle['urlToImage'])
        link.append(myarticle['url'])

        mylist = zip(news, desc, img, link)

    return render_template('sports.html', context=mylist)
Example #21
0
def rightwing():
    newsapi = NewsApiClient(api_key)
    rightwing = newsapi.get_top_headlines(
        sources=
        "breitbart-news, fox-news, national-review,  the-american-conservative, the-hill, the-jerusalem-post,  the-washington-times"
    )

    articles = rightwing['articles']

    news = []
    img = []
    desc = []
    url = []
    source = []

    for i in range(len(articles)):
        myarticles = articles[i]
        news.append(myarticles['title'])
        img.append(myarticles['urlToImage'])
        desc.append(myarticles['description'])
        url.append(myarticles['url'])
        source.append(myarticles['source'])

    mylist = zip(news, desc, img, url, source)
    return render_template('right-wing-news.html',
                           title="Mountain Dew Lovin' Far Right Extremists",
                           context=mylist)
Example #22
0
    def download_basic_news(self):
        """

        :return:
        """
        columns = [
            'id_source', 'name', 'author', 'title', 'description', 'url',
            'urlToImage', 'publishedAt', 'content'
        ]
        request_number = self.NUM_NEWS_TO_SCRAP

        # DATA PREPARATION
        # Building and executing the request to newsapi.org
        newsapi = NewsApiClient(api_key='a11cabb5333f4ade87a27e20f28bb568')
        all_articles = newsapi.get_top_headlines(sources=self.m_source_name,
                                                 language='en',
                                                 page_size=request_number)

        # DATA FORMATTING
        data = pd.DataFrame.from_dict(all_articles)
        data = data['articles'].apply(pd.Series)
        new_news = pd.concat(
            [data.drop(['source'], axis=1), data['source'].apply(pd.Series)],
            axis=1)
        new_news = new_news[[
            'id', 'name', 'author', 'title', 'description', 'url',
            'urlToImage', 'publishedAt'
        ]]
        new_news.rename(columns={'id': 'id_source'}, inplace=True)

        return new_news
Example #23
0
def index_page():
    global is_auth, current_user_logged_id
    if request.method == 'POST':
        if is_auth == 0:
            print(str(current_user_logged_id))
            return redirect(url_for('user_login'))
        if 'player' in request.form:
            print('Goes to All music Here with ' + str(is_auth) +
                  " and user id is " + str(current_user_logged_id))
            return redirect(url_for('all_music'))
        if 'profile' in request.form:
            return redirect(url_for('profile', u_id=current_user_logged_id))
        if 'logout' in request.form:
            return redirect(url_for('log_out'))
    news_client = NAC_client(api_key='f8fd249bda5f49c1b1ed79d08e99440d')
    try:
        all_news = news_client.get_top_headlines(q='music')
    except:
        all_news = None
    all_users = User.query.all()
    all_songs = Music.query.all()
    all_albums = Album.query.all()
    all_artists = Artist.query.all()
    dashboard_data = dashboard_wrapper(all_users, all_songs, all_albums,
                                       all_artists)
    news_list = []
    if all_news == None:
        news_list = 'No Internet Error Encountered while Fetching News'
    else:
        for news_obj in all_news['articles']:
            news_list.append(news_obj)
    return render_template("index.html",
                           news_data=news_list,
                           dash_data=dashboard_data,
                           is_auth=is_auth)
Example #24
0
def Index(request):
    newsapi = NewsApiClient(api_key="3261097a702947809aad72c0ab3c2291")
    topheadlines = newsapi.get_top_headlines(country="in")
    articles = topheadlines['articles']

    desc = []
    news = []
    img = []

    

    for i in range(len(articles)):
        myarticles = articles[i]

        news.append(myarticles['title'])
        desc.append(myarticles['description'])
        img.append(myarticles['urlToImage'])


         

    mylist = zip(news, desc, img)


    return render(request, 'mainpage.html', context={"mylist":mylist,})
Example #25
0
def getNews():
    # Init
    newsapi = NewsApiClient(api_key='input your key')

    top_headlines = newsapi.get_top_headlines(language='en')  # country='us'
    articles = top_headlines['articles']
    return articles
Example #26
0
def newsapi_client(stock, company):
    """
    Using NewsApiClient object -
    Note that NewsApiClient uses underscores in parameter names:
    exclude_domains, sort_by, page_size, api_key
    """

    # Init
    newsapi = NewsApiClient(api_key=API_KEY)

    # /v2/top-headlines
    top_headlines = newsapi.get_top_headlines(q=stock,
                                              sources='bbc-news,the-verge',
                                              language='en')
    save_data.save_json(top_headlines, "top_headlines")

    # /v2/everything
    everything = newsapi.get_everything(q=company,
                                        from_param='2021-01-01',
                                        to='2021-01-07',
                                        language='en',
                                        sort_by='relevancy',
                                        page_size=3,
                                        page=1)
    save_data.save_json(everything, "everything")

    # /v2/sources
    sources = newsapi.get_sources(language="en")
    save_data.save_json(sources, "sources")
Example #27
0
def technology_news():

    connect = MongoClient()
    db = connect.newsapi
    collection = db.newsapi_data

    headers = {'Authorization': '7cb11b15c84a4ff1af515df4c4dbaf47'}

    top_headlines_url = 'https://newsapi.org/v2/top-headlines'
    everything_news_url = 'https://newsapi.org/v2/everything'
    sources_url = 'https://newsapi.org/v2/sources'

    newsapi = NewsApiClient(api_key="7cb11b15c84a4ff1af515df4c4dbaf47")
    topheadlines = newsapi.get_top_headlines(country="us",
                                             category="technology")

    articles = topheadlines['articles']
    desc = []
    news = []
    img = []

    for i in range(len(articles)):
        myarticles = articles[i]
        news.append(myarticles['title'])
        desc.append(myarticles['description'])
        img.append(myarticles['urlToImage'])

        document = {'title': news, 'description': desc, 'urlToImage': img}

    collection.insert_one(document)

    print('data has been entered')
Example #28
0
def newsJutsu():
    newsapi = NewsApiClient(api_key=NEWSAPI_KEY)

    top_headlines = newsapi.get_top_headlines(language='en')
    articles = top_headlines['articles']

    # Rate of speech
    rate = engine.getProperty('rate')
    engine.setProperty('rate', 220)

    for i in range(0, len(articles)):
        if i > 5:
            speak("Hey, are you still listening?")
            response = "None"
            while response == "None":
                response = takeCommand(mode="inside_skill")
            if "yes" in response:
                pass
            else:
                return "Alright"

        if(articles[i]['title'] != None and articles[i]['description'] != None):
            # Either title or description
            speak(articles[i]['description'])
            # description = articles[i]['description'].replace("\n"," ")
            speak("Coming up next...")

        return "That's all for now"

    # Rate of speech
    rate = engine.getProperty('rate')
    engine.setProperty('rate', 195)
Example #29
0
 def getNews(self):
     # Creating News API object
     newsapi = NewsApiClient(api_key=self.news_api_key)
     top_headlines = newsapi.get_top_headlines(
         sources=
         'bbc-news,the-verge,the-wall-street-journal,bloomberg,the-economist,wired'
     )
     articles = top_headlines['articles']
     if len(articles) > 10:
         articles = articles[0:10]
     '''
     count = 1
     for article in articles:
         print(article['title'])
         print(article['url'])
         print(article['publishedAt'])
         print(article['source']['name'])
         print('-'*60)
         command = 'say "Number %s: %s writes %s"' % (str(count), article['source']['name'].encode('utf-8'), article['title'].encode('utf-8'))
         count += 1
         try:
             os.system(command)
         except KeyboardInterrupt:
             print("Interupted! How rude!")
         finally:
             sys.exit()
     '''
     return articles
Example #30
0
def espn():
    '''
    view page function that returns espn page
    '''

    newsapi = NewsApiClient(api_key = 'e7f56e932284432c89095aa5928310c7' )

    topheadlines = newsapi.get_top_headlines(sources="espn")

    articles = topheadlines['articles']

    desc = []
    news = []
    date = []
    link = []

    for i in range(len(articles)):
        myarticles = articles[i]

        news.append(myarticles['title'])
        desc.append(myarticles['description'])
        date.append(myarticles['publishedAt'])
        link.append(myarticles['url'])

    mylist = zip(news, desc, date,link)

    title = 'Home - Welcome to newshub'
    return render_template('espn.html', title = title,context= mylist)