Example #1
0
from flask import Flask, render_template
import requests
import wikipedia
from newsapi import NewsApiClient

app = Flask(__name__)
wikipedia.set_lang('simple')
newsapi = NewsApiClient(api_key='466f16c5dc2445eabe6a30991514a281')


@app.route('/', methods=['GET'])
def main():
    articles = []
    tech_article = newsapi.get_top_headlines(sources='techcrunch',
                                             page_size=1).get('articles')
    science_article = newsapi.get_top_headlines(sources='new-scientist',
                                                page_size=1).get('articles')
    math_article = newsapi.get_everything(q='math',
                                          sources='new-scientist',
                                          page_size=1).get('articles')
    politics_article = newsapi.get_everything(q='politics',
                                              sources='bbc-news',
                                              page_size=1).get('articles')
    articles.append(math_article)
    articles.append(politics_article)
    articles.append(tech_article)
    articles.append(science_article)
    tag_list = ['Math', 'Politics', 'Tech', 'Science']
    return render_template('index.html', articles_list=articles, tags=tag_list)

Example #2
0
                                      sources='bbc-news,the-verge',
                                      domains='bbc.co.uk,techcrunch.com',
                                      from_param='2017-12-01',
                                      to='2017-12-12',
                                      language='en',
                                      sort_by='relevancy',
                                      page=2)

# /v2/sources
sources = newsapi.get_sources()
"""

from newsapi import NewsApiClient

# Init
newsapi = NewsApiClient(api_key='deb16b6cdde14e63be41830980b7d40e')

# /v2/top-headlines
top_headlines = newsapi.get_top_headlines(q='bitcoin',
                                          sources='bbc-news,the-verge',
                                          category='business',
                                          language='en',
                                          country='us')

# /v2/everything
all_articles = newsapi.get_everything(q='bitcoin',
                                      sources='bbc-news,the-verge',
                                      domains='bbc.co.uk,techcrunch.com',
                                      from_param='2017-12-01',
                                      to='2017-12-12',
                                      language='en',
Example #3
0
def dashboard(request):
    storage = DjangoORMStorage(CredentialsModel, 'id', request.user, 'credential')
    credential = storage.get()
    if credential is None or credential.invalid:
        FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY,
                                                       request.user)
        authorize_url = FLOW.step1_get_authorize_url()
        return HttpResponseRedirect(authorize_url)
    else:
        http = httplib2.Http()
        http = credential.authorize(http)
        service = build('gmail', 'v1', http=http)
        print('GMAIL_AUTHENTICATE PARTIAL EXECUTION')
        print('access_token = ', credential.access_token)
        status = True
        access_token = credential.access_token
        senders = []
        messages = service.users().messages().list(userId='me',maxResults=10).execute().get('messages', [])
        for message in messages:
            mdata = service.users().messages().get(userId='me', id=message['id']).execute()
            for smalldicts in mdata['payload']['headers']:
                if smalldicts['name'] == 'From':
                    sender = ( ((smalldicts['value']).split('<'))[0] ).strip()
                    senders.append(sender)
        
        queries = []
        for sender in senders:
            if sender not in queries:
                queries.append(sender)
        print(queries)
        
        articleslist = []
        newsapi = NewsApiClient(api_key='513c2df2888d486aa67cd3835954f80d')

        for query in queries:
            all_articles = newsapi.get_everything(q=query,
                                      from_param='2019-10-04',
                                      to='2019-10-05',
                                      language='en',
                                      sort_by='relevancy',)
            #print(all_articles)
            if (all_articles['totalResults'] > 0):
                for newsarticle in all_articles['articles']:
                    article = {}
                    article['query'] = query
                    article['source'] = newsarticle['source']['name']
                    article['title'] = newsarticle['title']
                    article['author'] = newsarticle['author']
                    article['content'] = newsarticle['content']
                    article['description'] = newsarticle['description']
                    article['url'] = newsarticle['url']
                    article['imgurl'] = newsarticle['urlToImage']
                    article['date'] = ( (newsarticle['publishedAt']).split('T') )[0]
                    articleslist.append(article)
                #print(articleslist, end='\n\n')
        dashboardwant = True
        shuffle(articleslist)
        return render(request, 'mainapp/homepage.html', {'status': status, 
                                                         'dashboardwant': dashboardwant,
                                                         'access_token': access_token,
                                                         'articleslist' : articleslist,})
Example #4
0
from dash.dependencies import Input, Output
import chart_studio
# Login: group13Yes
# Password: group13HelloWorld
chart_studio.tools.set_credentials_file(username='******',
                                        api_key='OspkIgNNW6CTIM7110px')
import pandas as pd
from datetime import datetime
# Sql
from sqlConnector import get_history, get_all_stocks
# News API
# Login = [email protected]
# Password = group13HelloWorld
from newsapi import NewsApiClient

newsapi = NewsApiClient(api_key='0f58067ab2ad447ba8e4af81ecea25c5')


@app.route("/")
@app.route("/home")
def home():
    return render_template('home.html')


@app.route("/register", methods=['GET', 'POST'])
def register():
    form = RegistrationForm()
    if form.validate_on_submit():
        user = User(first_name=form.first_name.data,
                    last_name=form.last_name.data,
                    username=form.username.data,
Example #5
0
from newsapi import NewsApiClient

# Init
newsapi = NewsApiClient(api_key='793110ecda064a68846a1510d0e7d0ae')
all_articles = newsapi.get_everything(
    q='bitcoin',
    sort_by='relevancy',
)
print(all_articles)
Example #6
0
from urllib.request import urlopen as Ureq
from bs4 import BeautifulSoup as soup
from newsapi import NewsApiClient
# pre-processor
my_url = 'http://topforeignstocks.com/stock-lists/the-complete-list-of-biotech-stocks-trading-on-nasdaq/'  # List of biotech companies
uClient = Ureq(my_url)  #downloads webpage
page_html = uClient.read()
page_soup = soup(page_html, "html.parser")
# print(page_soup.tbody.td)
bio_tech_companies = page_soup.findAll("td", {"class": "column-2"})
for i in range(1):
    query = str(bio_tech_companies[i].text.strip())
print(query)
newsapi = NewsApiClient(api_key='42eab217e53348febe920e907f524b0f')
top_headlines = newsapi.get_top_headlines(q=str('biotech'), language='en')
print(top_headlines)
uClient.close()
pip install json

"""


def speak(str):
    from win32com.client import Dispatch
    speak = Dispatch("SAPI.Spvoice")
    speak.speak(str)


if __name__ == '__main__':
    from newsapi import NewsApiClient
    speak("welcome sir - This is BBC News. Created By Mister HACKER")

    newsapi = NewsApiClient(api_key='7067179f741445b0b6fd8ebdbfda452e')

    news_source = newsapi.get_sources()
    for source in news_source['sources']:
        print(
            'News Channel Name:',
            source['name'],
        )
        # speak(source['name'])

    print("\n\n")
    print("\t\t\t======= DAILY LATEST NEWS PAPER =======")
    print("\n\n")
    top_headline = newsapi.get_top_headlines(q='world war', language='en')

    for article in top_headline['articles']:
Example #8
0
def process_text(input, intent, request=None):
    try:
        if intent == 'word':
            open_application('word')
        if intent == 'firefox':
            open_application('firefox')
        if intent == 'youtube':
            assistant_speaks('Sure. Tell me what do you want to search for')
            ans = get_audio()
            result = parse_stopwords(ans)
            search_web('youtube ' + result)
        if intent == 'knowledge':
            if 'about' in input:
                parse_text(input, 'about')
            if 'what is' in input:
                parse_text(input, 'is')
            if 'who was' in input:
                parse_text(input, 'was')
        if intent == 'web_search':
            if 'about' in input:
                ans = input
                indx = ans.split().index('about')
                query = ans.split()[indx + 1:]
                string_query = ' '.join(query)
                result = parse_stopwords(string_query)
                search_web('google ' + result)
            if 'for' in input:
                ans = input
                indx = ans.split().index('for')
                query = ans.split()[indx + 1:]
                string_query = ' '.join(query)
                result = parse_stopwords(string_query)
                search_web('google ' + result)
            assistant_speaks('Going back to the main interface')
        movie_list_intents = ['movie', 'horror', 'action', 'comedy', 'popular', 'thriller']
        if intent in movie_list_intents:
            from tmdbv3api import Movie

            movie = Movie()
            discover = Discover()
            if intent == 'popular':
                pop_movie = discover.discover_movies({
                    'sort_by': 'popularity.desc'
                })
                assistant_speaks("The most popular 5 movies are the following")
                pop_movies = ", ".join(str(x) for x in pop_movie[0:5])
                assistant_speaks(pop_movies)
            if intent == 'horror':
                parse_movies(27, discover, movie, 'horror')
            if intent == 'action':
                parse_movies(28, discover, movie, 'action')
            if intent == 'comedy':
                parse_movies(35, discover, movie, 'comedy')
            if intent == 'thriller':
                parse_movies(53, discover, movie, 'thriller')
            if intent == 'movie':
                assistant_speaks('Do you want a movie recommendation based on your favorite movie?')
                ans = get_audio()
                if 'yes' in ans:
                    try:
                        pacient = Pacient.objects.get(user=request.user)
                    except:
                        assistant_speaks('it looks like you have not discussed that with me. '
                                         'please enter discussion module first')
                    pac_details = PacientDetails.objects.get(pacient=pacient)
                    fav_movie = pac_details.fav_movie
                    search_movie = movie.search(fav_movie)
                    assistant_speaks('I will read top three recommended movies based on your favorite movie')
                    res = search_movie[0]
                    recommendations = movie.recommendations(movie_id=res.id)
                    cnt = 0
                    for recommendation in recommendations:
                        if cnt >= 3:
                            break
                        else:
                            assistant_speaks(recommendation.title)
                            assistant_speaks(recommendation.overview)
                        cnt += 1

                    assistant_speaks('Exiting movie module')
                else:
                    assistant_speaks(
                        'I can give you the top movies based on a genre. Just tell me what are you looking for')
                    ans = get_audio()
                    res, ints = chatbot_response(ans)
                    process_text(ans, ints)

        pacient = Pacient.objects.get(user=request.user)
        pac_pars = PacientParsing.objects.get(pacient=pacient)
        if intent == 'event':
            from googleapiclient.discovery import build
            from google_auth_oauthlib.flow import InstalledAppFlow
            scopes = ['https://www.googleapis.com/auth/calendar']
            flow = InstalledAppFlow.from_client_secrets_file("client_secret.json", scopes=scopes)
            credentials = flow.run_console()
            pickle.dump(credentials, open("token.pkl", "wb"))
            credentials = pickle.load(open("token.pkl", "rb"))
            service = build("calendar", "v3", credentials=credentials)

            calendarlist = service.calendarList().list().execute()
            calendar_id = calendarlist['items'][0]['id']
            result = service.events().list(calendarId=calendar_id, timeZone="Europe/Bucharest").execute()

            timp_event = get_audio()

            assistant_speaks("What about the name of the event?")
            name = get_audio()

            assistant_speaks("would you like to add a description?")
            ans = get_audio()
            sub_resp, sub_intent = chatbot_response(ans)
            if sub_intent == 'yes':
                assistant_speaks("please tell me the description")
                desc = get_audio()
                assistant_speaks("should i add a location too?")
                ans = get_audio()
                sub_resp, sub_intent = chatbot_response(ans)
                if sub_intent == 'yes':
                    assistant_speaks("Go ahead, tell me the location")
                    location = get_audio()
                    create_event(service, timp_event, name, 1, desc, location)
                elif sub_intent == 'no':
                    create_event(service, timp_event, name, 1, desc)
            elif sub_intent == 'no':
                create_event(service, timp_event, name)
            assistant_speaks('Event ' + name + ' created.')
            assistant_speaks('Exiting event module')
        if intent == 'web':
            ans = get_audio()
            result = parse_stopwords(ans)
            search_web('google ' + result)
            assistant_speaks('Exiting web module')
        if intent == 'discussion':
            assistant_speaks('Is there a certain topic you would like to discuss?')
            ans = get_audio()
            # print(ans)
            sub_resp, sub_intent = chatbot_response(ans)
            # print(sub_intent)
            if sub_intent == 'no':
                assistant_speaks('Then how about you tell me more about yourself?')
                try:
                    pac_details = PacientDetails.objects.get(pacient=pacient)
                except PacientDetails.DoesNotExist:
                    pac_details = PacientDetails.objects.create(pacient=pacient)
                if pac_details.fav_activity == '':
                    assistant_speaks('Tell me your favorite activity')
                    ans = get_audio()
                    if "don't" not in ans or 'no' not in ans:
                        ans = parse_details(ans)
                        # print(ans)
                        pac_details.fav_activity = ans
                        pac_details.save()
                if pac_details.fav_movie == '':
                    assistant_speaks('what about your favorite movie?')
                    ans = get_audio()
                    if "don't" not in ans or 'no' not in ans:
                        ans = parse_details(ans)
                        pac_details.fav_movie = ans
                        pac_details.save()
                if pac_details.fav_game == '':
                    assistant_speaks('Tell me your favorite game')
                    ans = get_audio()
                    if "don't" not in ans or 'no' not in ans:
                        ans = parse_details(ans)
                        pac_details.fav_game = ans
                        pac_details.save()
                if pac_details.fav_passion == '':
                    assistant_speaks('Do you have a favorite passion?')
                    ans = get_audio()
                    if "don't" not in ans or 'no' not in ans:
                        ans = parse_details(ans)
                        pac_details.fav_passion = ans
                        pac_details.save()
                if pac_details.fav_song == '':
                    assistant_speaks('What is your favorite song?')
                    ans = get_audio()
                    if "don't" not in ans or 'no' not in ans:
                        ans = parse_details(ans)
                        pac_details.fav_song = ans
                        pac_details.save()
                if pac_details.fav_book == '':
                    assistant_speaks('And your favorite book is?')
                    ans = get_audio()
                    if "don't" not in ans or 'no' not in ans:
                        ans = parse_details(ans)
                        pac_details.fav_book = ans
                        pac_details.save()
                assistant_speaks("How was your day so far? When you have finished talking, please say that's it")
                r = sr.Recognizer()
                list = []
                happy_list = ['That sounds great !', 'Wow, I am glad for you', 'Good job!', 'This sounds awesome']
                neutral_list = ['Okay, continue', 'Understood', 'What next?', 'Is there something more?']
                sad_list = ['What is the specific reason that made you feel this way? Please keep it short',
                            'Can you please tell me what is the root of the problem? Please keep it short',
                            'what disturbed you that much? Please keep it short']
                with sr.Microphone() as source:
                    while (1):
                        try:
                            print('Listening to your day: ')
                            audio = r.listen(source)
                            text = r.recognize_google(audio, language='en-US')
                            blob1 = TextBlob(text)
                            blob1 = blob1.correct()
                            text = text.lower()
                            print(format(blob1.sentiment))
                            if "that's it" in text:
                                break
                            if blob1.polarity < 0:
                                assistant_speaks(random.choice(sad_list))
                                motiv = get_audio_simple()
                                pac_pars._negative_problems += motiv + '\n'
                                pac_pars.contor_mesaje += 1
                                if pac_pars.contor_mesaje % 3 == 0:
                                    pac_pars.contor_mesaje = 0
                                    send_mail(
                                        'Mesaj informare pacient ' + str(pacient),
                                        'Urmatoarele probleme par sa-l afecteze pe pacient: ' + pac_pars._negative_problems,
                                        '*****@*****.**',
                                        ['*****@*****.**'],
                                        fail_silently=False,
                                    )
                                list.append(motiv)
                                pac_pars.save()
                                assistant_speaks('Sorry to hear that, please continue')
                            if blob1.polarity > 0.5:
                                assistant_speaks(random.choice(happy_list))
                            elif blob1.polarity <= 0.5 and blob1.polarity >= 0:
                                assistant_speaks(random.choice(neutral_list))
                        except:
                            continue

                motiv = random.choice(list)
                research_later = "what+to+do+when+" + motiv
                ua = UserAgent()
                google_url = "https://www.google.com/search?q=" + research_later
                response = requests.get(google_url, {"User-Agent": ua.random})
                soup = BeautifulSoup(response.text, "html.parser")
                result_div = soup.find_all('div', attrs={'class': 'ZINbbc'})

                links = []
                titles = []
                descriptions = []
                for r in result_div:
                    # Checks if each element is present, else, raise exception
                    try:
                        link = r.find('a', href=True)
                        title = r.find('div', attrs={'class': 'vvjwJb'}).get_text()
                        description = r.find('div', attrs={'class': 's3v9rd'}).get_text()

                        # Check to make sure everything is present before appending
                        if link != '' and title != '' and description != '':
                            links.append(link['href'])
                            titles.append(title)
                            descriptions.append(description)
                    # Next loop if one element is not present
                    except:
                        continue
                to_remove = []
                clean_links = []
                for i, l in enumerate(links):
                    clean = re.search('\/url\?q\=(.*)\&sa', l)

                    # Anything that doesn't fit the above pattern will be removed
                    if clean is None:
                        to_remove.append(i)
                        continue
                    clean_links.append(clean.group(1))
                # Remove the corresponding titles & descriptions
                # for x in to_remove:
                #     print(titles[x])
                #     print(descriptions[x])
                #     del titles[x]
                #     del descriptions[x]
                random_seed = random.randint(0, 3)
                print('rand_seed: ')
                print(random_seed)
                print('titles: ' + str(len(titles)))
                print('links: ' + str(len(clean_links)))
                assistant_speaks("I have found something regarding the problems you have just told me")
                assistant_speaks("The article title is called")
                assistant_speaks(titles[random_seed])
                assistant_speaks("Do you want me to open the link for you?")
                ans = get_audio()
                sub_resp, sub_intent = chatbot_response(ans)
                if sub_intent == 'yes':
                    driver = webdriver.Firefox()
                    driver.implicitly_wait(1)
                    driver.maximize_window()
                    driver.get(clean_links[random_seed])
                    assistant_speaks('I have opened the browser for you. Exiting discussion module')
                else:
                    assistant_speaks('Exiting discussion module')
                    return
            elif sub_intent == 'yes':
                assistant_speaks('Tell me what do you want to discuss.')
                ans = get_audio()
                res, intent = chatbot_response(ans)
                process_text(ans, intent, request)

        if intent == 'news':
            assistant_speaks("Would you like the news on a specific subject?")
            ans = get_audio()
            newsapi = NewsApiClient(api_key='b71542370a6247d493860e6b01d0d713')
            sub_resp, sub_intent = chatbot_response(ans)
            if sub_intent == 'yes':
                assistant_speaks('What would you like me to search for? Please be as specific as you can.')
                ans = get_audio()
                data = newsapi.get_everything(q=ans, language='en', page_size=5)
                articles = data['articles']
                assistant_speaks(
                    'You could choose one article by saying stop when i finish reading the headline. To continue '
                    'reading the headlines, just say continue. I found the following articles: ')
                for article in articles:
                    title = article['title']
                    url = article['url']
                    content = article['content']
                    assistant_speaks(title)
                    ans = get_audio()
                    if 'continue' in ans:
                        continue
                    elif 'stop' in ans:
                        assistant_speaks('I will read the article content')
                        assistant_speaks(content)
                        assistant_speaks(
                            'I can open the webpage which contains the article source. Do you want me to do that? ')
                        ans = get_audio()
                        sub_resp, sub_intent = chatbot_response(ans)
                        if sub_intent == 'yes':
                            driver = webdriver.Firefox()
                            driver.implicitly_wait(1)
                            driver.maximize_window()
                            driver.get(url)
                            r = sr.Recognizer()
                            assistant_speaks(
                                'I have opened your browser. To resume the articles read, just say resume. '
                                'If you want me to stop, just say stop')
                            with sr.Microphone() as source:
                                while (1):
                                    print('Listening ...')
                                    audio = r.listen(source)
                                    try:
                                        text = r.recognize_google(audio, language='en-US')
                                        if 'resume' in text:
                                            break
                                        elif 'stop' in text:
                                            return
                                    except:
                                        continue
                        elif sub_intent == 'no':
                            assistant_speaks('would you like me to continue reading the next articles?')
                            ans = get_audio()
                            sub_resp, sub_intent = chatbot_response(ans)
                            if sub_intent == 'yes':
                                continue
                            elif sub_intent == 'no':
                                assistant_speaks('If you want to find out more, just let me know. Exiting news module')
                                break
            elif sub_intent == 'no':
                assistant_speaks('Alright, i am going to search for the top headlines')
                url = ('http://newsapi.org/v2/top-headlines?'
                       'country=us&'
                       'apiKey=b71542370a6247d493860e6b01d0d713')
                response = requests.get(url).json()
                articles = response['articles']
                assistant_speaks(
                    'Say stop after I finish reading the headline to tell you its content. To continue reading '
                    'the headlines, just say continue. I found the following articles: ')
                for article in articles:
                    title = article['title']
                    url = article['url']
                    content = article['content']
                    assistant_speaks(title)
                    ans = get_audio()
                    if 'continue' in ans:
                        continue
                    elif 'stop' in ans:
                        assistant_speaks('I will read the article content')
                        assistant_speaks(content)
                        assistant_speaks('I can open the webpage which contains the article source. Do you want me'
                                         ' to do that? ')
                        ans = get_audio()
                        sub_resp, sub_intent = chatbot_response(ans)
                        if sub_intent == 'yes':
                            driver = webdriver.Firefox()
                            driver.implicitly_wait(1)
                            driver.maximize_window()
                            driver.get(url)
                        elif sub_intent == 'no':
                            assistant_speaks('would you like me to continue reading the next articles?')
                            ans = get_audio()
                            sub_resp, sub_intent = chatbot_response(ans)
                            if sub_intent == 'yes':
                                return
                            elif sub_intent == 'no':
                                assistant_speaks('If you want to find out more, just let me know. Exiting news module')
                                break
                    elif 'exit' in ans:
                        break
    except Exception as e:
        print(e)
        assistant_speaks("I don't understand, Can you please repeat?")
        ans = get_audio()
Example #9
0
from django.shortcuts import render
from newsapi import NewsApiClient
import json
#Initializing API KEY
newsapi = NewsApiClient(api_key='64f09c3d3cec421abc22451aabd11c5a')
all_articles = newsapi.get_everything(
    q='bitcoin',
    sources='google,the-verge,cnbc,bloomberg',
    language='en',
    sort_by='relevancy')

# loading all_articles as json
new = json.dumps(all_articles)
# This will allow us to create creat dictionary from the json which will make easier for us to use the data
data = json.loads(new)


def index(request):
    return render(request, 'index.html', {'data': data})
Example #10
0
import json
import os, re
from flask import Flask, jsonify, request
from newsapi import NewsApiClient

app = Flask(__name__)
app._static_folder = os.path.abspath("static/")

# Defined all API calls and initializations
# News API declare
newsapis = NewsApiClient(api_key="52adbcd73de3426da771cdcaecfac96c")

# Get top headlines (Used in slide_top_headlines() and word_cloud())
topheadlines = newsapis.get_top_headlines(language='en', page_size=30)
top_headlines_articles = topheadlines['articles']


# Index Page
def nonempty(data):
    if data is not None:
        return True
    else:
        return False


def filter_json(json_file, number):
    count = 0
    result = []
    for i in range(len(json_file)):
        headlines = json_file[i]
        if nonempty(headlines['source']['name']) and nonempty(headlines['author']) and nonempty(headlines['title']) and \
api = tweepy.API(auth, wait_on_rate_limit=True)
#
sp = spacy.load('en_core_web_sm')
tweet_list=[]
#
# client = MongoClient('mongodb://*****:*****@3.19.213.209/cool_db')
# db = client['cool_db']
# tweetDataDocument = db.tweetData
#
client = MongoClient(port=27017)
db = client['tweetDB']
tweetDataDocument = db.tweetData
searchable_list=["Canada","University","Dalhousie University","Halifax","Canada Education"]
for search_item in searchable_list:
    newsapi = NewsApiClient(api_key='0e1a8832c2b240e8a16234661e9f4847')
    pageSize=1
    while pageSize < 20:
        try:
            all_articles = newsapi.get_everything(q=search_item,page=pageSize)
            for each_article in all_articles["articles"]:
                try:
                    textVal = each_article["content"]
                    if textVal is not None:
                        #print(textVal)
                        resultObj = getOriginalText(textVal)
                        textVal = resultObj["text"]
                        extraChars=None
                        if 'extraChars' in resultObj.keys():
                            extraChars = resultObj['extraChars']
                        textVal=getTextFromURL(each_article["url"],textVal,extraChars)
Example #12
0
from newsapi import NewsApiClient

newsapi = NewsApiClient(api_key='692f7d1a9a4743918f99bc95c73a28ed')


class news_articles:
    def __init__(self, title, url):
        self.title = title
        self.url = url


# headlines = newsapi.get_everything(q = 'apple',
#                                         sources = 'bloomberg',
#                                         language = 'en',
#                                         sort_by = 'relevancy',
#                                         page = 1)

# results = []
# desc = []
# articles = headlines['articles']
# for ar in articles:
#     results.append(ar["title"])
#     desc.append(ar["description"])

# for i in range(len(results)):
#     print(i + 1, results[i])
#     print(desc[i])


def get_news(topic):
    news = newsapi.get_everything(
import pandas as pd
import json
import time
import sys

variablefile = open(
    '/Users/psehgal/dev/Sentiment.Analysis/preeti_final/sentiment_program_variables_ps.json'
)
df_var = json.load(variablefile)

if df_var["runnewsapiflag"] != 'Y':
    sys.exit()
# this will avoid this api to run everytime we test our data pipeline

# Init
newsapi = NewsApiClient(api_key=df_var["newsapikey"])

# Delete CSV to overwrite
if os.path.exists('headlines.csv'):
    os.remove('headlines.csv')

# Create new CSV with headers
with open('headlines.csv', 'w', newline='') as f:
    w = csv.writer(f)
    w.writerow(['date', 'source', 'headline'])

# Obtain article count per day for date range
#start = '2020-04-14'
#end = '2020-05-14'
start = df_var["newsapistartdate"]
end = df_var["newsapienddate"]
Example #14
0
def news(request):
    newsapi = NewsApiClient(api_key='7184697691164311aaca455ed36c0b68')
    top_headlines = newsapi.get_top_headlines(sources='ign')
    return render(request, 'wegame/news.html', {
        'articles':top_headlines['articles']
    })
Example #15
0
def extract_news_articles(nlp, output_csv, output_lemmas):
    """ Extracts all tokens from title and descriptions of German news article providers.

    Note: this text corpus was not used

    :param nlp: spacy model
    :param output_csv: output path for normal token.text
    :param output_lemmas: output path for token.lemma_
    """
    # initialize newsapi
    api_key = '2f4a6aa461194cce948ded99f35fae6f'
    newsapi = NewsApiClient(api_key)

    start_time = timer()

    # get German news providers
    sources = [
        src['id'] for src in newsapi.get_sources(language='de')['sources']
    ]
    sources_string = sources[0]
    for i in range(1, len(sources)):
        sources_string += ',{}'.format(sources[i])

    # get articles
    articles = newsapi.get_everything(language='de',
                                      sources=sources_string,
                                      page_size=100)
    print('Total results: {}'.format(articles['totalResults']))

    news_articles = []
    news_articles_lemmas = []

    # TODO: add param to specify number of articles

    for article in articles['articles']:
        # get article content
        content = article['title'] + '. ' + article['description']
        if article['content'] is not None:
            content += '. ' + article['content']

        # extract words and lemmas from article
        word_dict, lemma_dict = get_tokens(nlp, content)
        # sort dictionaries descending by appearance of tokens
        word_dict = {
            k: v
            for k, v in sorted(
                word_dict.items(), key=lambda item: item[1], reverse=True)
        }
        lemma_dict = {
            k: v
            for k, v in sorted(
                lemma_dict.items(), key=lambda item: item[1], reverse=True)
        }
        # add tokens of pages to list
        if len(word_dict) > 0:
            news_articles.append(word_dict)
        if len(lemma_dict) > 0:
            news_articles_lemmas.append(lemma_dict)

    # export lists to csv files
    export_docs(news_articles, output_csv)
    export_docs(news_articles_lemmas, output_lemmas)

    end_time = timer()
    print('Done after {}s'.format(end_time - start_time))
Example #16
0
from gpiozero import LED
from time import sleep
from signal import pause
from telepot.loop import MessageLoop
from geopy.geocoders import Nominatim
from newsapi import NewsApiClient
import sys
import re
from time import sleep
from twx.botapi import TelegramBot, ReplyKeyboardMarkup
import traceback
from pyowm import OWM

apiKey = '895dd3e71642adda4c6a2a1c9ad09b54'
owm = pyowm.OWM(apiKey)
newsapi = NewsApiClient(api_key='b0ac87d0931e4cdda08c441d97d9daf8')
now = datetime.datetime.now()
TOKEN = '862774896:AAEA56RsA0Od0rxBA49vGr5zoASSD-NQySQ'
OWMKEY = '895dd3e71642adda4c6a2a1c9ad09b54'
bot = telepot.Bot('862774896:AAEA56RsA0Od0rxBA49vGr5zoASSD-NQySQ')
broom = LED(18)
lroom = LED(23)
groom = LED(17)
ol = LED(27)


def process_message(msg):
    keyboard = [['Get Weather']]
    reply_markup = ReplyKeyboardMarkup.create(keyboard)

    chat_id = msg['chat']['id']
Example #17
0
    def handle(self, *args, **options):
        # 最初にモデル内のデータを初期化
        Topics.objects.all().delete()
        c_list = ["kr", "us", "gb", "it", "fr", "jp", "de"]
        newsapi = NewsApiClient(api_key=settings.NEWSAPI)
        for name in c_list:
            if name == "jp":
                context = newsapi.get_everything(q="コロナ", page_size=100, page=1, sort_by="popularity")
                for i in context["articles"]:
                    try:
                        if i["description"] is not None:
                            Topics.objects.create(title=i["title"], published_date=i["publishedAt"], description=i["description"],
                                                  author=i["author"], image_url=i["urlToImage"], topic_url=i["url"],
                                                  domain_tags=name,)
                    except json.decoder.JSONDecodeError:
                        pass
                context = newsapi.get_top_headlines(q="コロナ", country=name, page_size=100)
                for i in context["articles"]:
                    try:
                        if i["description"] is not None:
                            Topics.objects.create(title=i["title"], published_date=i["publishedAt"], description=i["description"],
                                                  author=i["author"], image_url=i["urlToImage"], topic_url=i["url"],
                                                  top_news="top", domain_tags=name,)
                    except json.decoder.JSONDecodeError:
                        pass
            elif name == "kr":
                  context = newsapi.get_everything(q=translate("コロナウイルス", "ja", name), page_size=100, page=1,)
                  for i in context["articles"]:
                      try:
                          if i["description"] is not None:
                             Topics.objects.create(title=translate(i["title"], name, "ja"),
                                                  published_date=i["publishedAt"],
                                                  description=translate(i["description"], name, "ja"),
                                                  author=i["author"],
                                                  image_url=i["urlToImage"], topic_url=i["url"], domain_tags=name)
                      except json.decoder.JSONDecodeError:
                          pass

                  context = newsapi.get_top_headlines(q=translate("コロナウイルス", "ja", name), country=name, page_size=100)
                  for i in context["articles"]:
                      try:
                          if i["description"] is not None:
                             Topics.objects.create(title=translate(i["title"], name, "ja"),
                                                  published_date=i["publishedAt"],
                                                  description=translate(i["description"], name, "ja"),
                                                  author=i["author"], image_url=i["urlToImage"], topic_url=i["url"],
                                                  top_news="top", domain_tags=name, )
                      except json.decoder.JSONDecodeError:
                          pass
            elif name == "gb":
                context = newsapi.get_everything(q=translate("コロナウイルス", "ja", name), page_size=100, page=1, domains="bbc.co.uk",)
                for i in context["articles"]:
                    try:
                        if i["description"] is not None:
                            Topics.objects.create(title=translate(i["title"], name, "ja"),
                                                  published_date=i["publishedAt"],
                                                  description=translate(i["description"], name, "ja"),
                                                  author=i["author"],
                                                  image_url=i["urlToImage"], topic_url=i["url"], domain_tags=name)
                    except json.decoder.JSONDecodeError:
                        pass

                context = newsapi.get_top_headlines(q=translate("コロナウイルス", "ja", name), country=name, page_size=100)
                for i in context["articles"]:
                    try:
                        if i["description"] is not None:
                            Topics.objects.create(title=translate(i["title"], name, "ja"),
                                                  published_date=i["publishedAt"],
                                                  description=translate(i["description"], name, "ja"),
                                                  author=i["author"], image_url=i["urlToImage"], topic_url=i["url"],
                                                  top_news="top", domain_tags=name, )
                    except json.decoder.JSONDecodeError:
                        pass

            else:
                if name == "us":
                    context = newsapi.get_everything(q=translate("コロナウイルス", "ja", name), page_size=100, page=1,
                                                     language='en')
                else:
                    context = newsapi.get_everything(q=translate("コロナウイルス", "ja", name), page_size=100, page=1,
                                                     language=name)
                for i in context["articles"]:
                    try:
                        if i["description"]or i["title"] is not None:
                            Topics.objects.create(title=translate(i["title"], name, "ja"), published_date=i["publishedAt"],
                                                  description=translate(i["description"], name, "ja"), author=i["author"],
                                                  image_url=i["urlToImage"], topic_url=i["url"], domain_tags=name)
                    except json.decoder.JSONDecodeError:
                        pass
                context = newsapi.get_top_headlines(q=translate("コロナウイルス", "ja", name), country=name, page_size=100)
                for i in context["articles"]:
                    try:
                        if i["description"] or i["title"] is not None:
                            Topics.objects.create(title=translate(i["title"], name, "ja"), published_date=i["publishedAt"],
                                                  description=translate(i["description"], name, "ja"),
                                                  author=i["author"], image_url=i["urlToImage"], topic_url=i["url"],
                                                  top_news="top", domain_tags=name, )
                    except json.decoder.JSONDecodeError:
                        pass

        print('test command')
from newsapi import NewsApiClient
from bs4 import BeautifulSoup
import urllib
import requests
import json

newsapi = NewsApiClient(api_key='6e1a2d3c4b0b449d9e76fb7b297c25d4')

cat = "business"
categoryTitle = (cat[0].upper() + cat[1:]) + "Titles.txt"
categoryCollection = (cat[0].upper() + cat[1:]) + "Collection.txt"

collectionFile = open(categoryCollection, "a", encoding='utf-8')

titles = {}
titleFile = open(categoryTitle, "r", encoding='utf-8')
for line in titleFile:
    titles[line] = 0
titleFile.close()

titleFile = open(categoryTitle, "a", encoding='utf-8')

top_headlines = newsapi.get_top_headlines(country='us',
                                          category=cat,
                                          language='en')

articles = top_headlines["articles"]

for article in articles:
    title = article["title"]
    print(title)
Example #19
0
#!/usr/bin/python
import sys
import signal
import epd2in13_V2
import epdconfig
import time
from PIL import Image, ImageDraw, ImageFont
import traceback
from newsapi import NewsApiClient
import json
import requests
from datetime import date

api = NewsApiClient(api_key='0233006c7dc448ffaffea5cdfd337976')

top_headlines = api.get_top_headlines(sources='die-zeit', language='de')
top_headlines_2 = api.get_top_headlines(sources='bbc-news', language='en')

#Checks if python 3+ is being used, otherwise an exception is thrown
if sys.version_info[0] < 3:
    raise Exception("Must be using Python 3.0 or greater!")

#Main function


def main():
    y = json.dumps(top_headlines)
    x = json.loads(y)
    yy = json.dumps(top_headlines_2)
    xx = json.loads(yy)
    source_name = x['articles'][0]['source']['name']
Example #20
0
 def __init__(self):
     Article.objects.all().delete()
     self.newsapi = NewsApiClient(
         api_key='28b3c6db134d4309ae66b07ce9e448c6')
import dash
import dash_table as dt
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
from newsapi import NewsApiClient
from textblob import TextBlob
import plotly.graph_objs as go
import numpy as np
from plotly.tools import FigureFactory as FF
import matplotlib.pyplot as plt

app = dash.Dash()
df = pd.read_csv("/Users/viraj/Desktop/Avant/App/companylist.csv")
newsapi = NewsApiClient(api_key='19566a6cc96747eeadf90d82c464467d')

companies = df[(df['Sector'] == 'Technology')
               & (df['MarketCap'] >= '$1.00B')]['Symbol']


def generate_table(dataframe, max_rows=10):
    return html.Table(
        # Header
        [html.Tr([html.Th(col) for col in dataframe.columns])] +

        # Body
        [
            html.Tr(
                [html.Td(dataframe.iloc[i][col]) for col in dataframe.columns])
            for i in range(min(len(dataframe), max_rows))
Example #22
0
            name = takecommand()
            os.remove(name)
            speak("File removed succesfully")

        elif 'weather update' in query:
            speak("Which city sir??")
            info = takecommand()
            city = info
            api_address = 'http://api.openweathermap.org/data/2.5/weather?appid='Your Openweathermap API KEY'&q='
            url = api_address + city
            json_data = requests.get(url).json()
            print(json_data)
            speak(json_data)

        elif 'news update' in query:
            newsapi = NewsApiClient(api_key='Your NEWS API KEY')
            speak("Please specify any topic")
            src = takecommand()
            speak(f"The news headlines of {src} are")
            headlines = newsapi.get_top_headlines(q=src, sources='the-times-of-india, the-hindu', language='en')
            print(headlines)
            speak(headlines)
            sources = newsapi.get_sources()
            speak("Should I read the whole news? yes or no")
            say = takecommand()
            if say == 'yes':
                speak(f"The news update about {src} are")
                article = newsapi.get_everything(q=src, sources='the-times-of-india, the-hindu', domains='timesofindia.com, thehindu.com', language='en', sort_by='relevancy')
                print(article)
                speak(article)
                sources = newsapi.get_sources()
Example #23
0
 def swap_api_key(self):
     self._current_key += 1
     if self._current_key == len(self._api_keys):
         self._current_key = 0
     self._news_api = NewsApiClient(
         api_key=self._api_keys[self._current_key])
Example #24
0
elif hours == 9:
    news_api_key_index = 5
elif 10 <= hours <= 11:
    news_api_key_index = 6
elif 12 <= hours <= 13:
    news_api_key_index = 7
elif 14 <= hours <= 15:
    news_api_key_index = 8
elif 16 <= hours <= 17:
    news_api_key_index = 9
elif 18 <= hours <= 20:
    news_api_key_index = 10
else:
    news_api_key_index = 11

news_api = NewsApiClient(api_key=config.NEWS_TOKEN[news_api_key_index])


def get_weather(city):
    """Работает с pyowm API, возвращает готовое сообщение с погодой"""
    observation = owm.weather_at_place(city)
    w = observation.get_weather()
    detailed_status = w.get_detailed_status()
    temp = str(w.get_temperature("celsius")["temp"])
    humidity = str(w.get_humidity())
    wind = str(w.get_wind()["speed"])

    if detailed_status in private_tmp_msg.weather_emoji:
        message = f'В городе <b>{city}</b> в ближайшее время будет <b>{detailed_status}</b>' \
                  f'{private_tmp_msg.weather_emoji[detailed_status]}'
    else:
Example #25
0
 def __init__(self):
     api_key = os.environ.get("NEWS_API_KEY") if os.environ.get(
         "NEWS_API_KEY") else get_api_key()
     self.news_client = NewsApiClient(api_key=api_key)
     self.category_map = get_category_maps()
Example #26
0
import json
import pymysql

pymysql.install_as_MySQLdb()
import MySQLdb
from newsapi import NewsApiClient
from time import sleep
from datetime import datetime

newsapi = NewsApiClient(api_key='4b4233b0e7c243ea8bdd9abf5a19bbbd')
sources = 'the-hindu,bbc-news,fox-news,the-times-of-india,cnn,espn'


def getArticles():
    db = MySQLdb.connect('127.0.0.1', 'root', '', 'dlm')
    cursor = db.cursor()
    page, limit = 1, 5
    count = 0
    while page <= limit:
        result = newsapi.get_everything(sources=sources,
                                        language='en',
                                        page=page)
        articles = result['articles']
        for article in articles:
            if not article['title'] or not article['description']:
                continue
            values = [
                article['source']['name'], article['author'],
                json.dumps(article['title']),
                json.dumps(article['description'])
            ]
Example #27
0
def insert_news(news):
    print("Hello")
    n = 1
    l = []
    articles = news['articles']
    for x in articles:
        head = x['title']
        news_sno = n
        timestamp = x['publishedAt']
        n += 1
        news_head = {'sno': news_sno, 'news': head, 'created': timestamp}
        l.append(news_head)
    return l

db = connection.news
newsapi = NewsApiClient(api_key='54fc5717f29942ada37bc72e28e335fb')

i=0
j = 0
key1 = []
l=[]
while l==[]:
    top_headlines = newsapi.get_top_headlines(q=key[i], language='en')
    l = insert_news(top_headlines)
    i += 1
key1.append(key[i-1])
db.news_key1.insert_many(l)

l=[]
while l==[]:
    top_headlines = newsapi.get_top_headlines(q=key[i], language='en')
from newsapi import NewsApiClient
import praw


newsapi = NewsApiClient("6fc8fbebbb8d48859431e23e20ef264b")

reddit = praw.Reddit(client_id='ea4HN792BzUdOQ', \
                     client_secret='zH-1JdgfVEv1Xg1KctjAi9KvG28', \
                     user_agent='news-aggregator'
                     )

def getPostsFromNewsAPI(query = None):
    """Fetches results from NewsAPI based on the query in parameters.

    Connects to the NewsAPI, fetches the articles. If the query is empty then it fetches the top results, otherwise it fetches the results based on the query.

    :param query: The query to fetch the results for, defaults to None
    :type query: str, optional
    :return: A list of fetched results based on the query in dictionary format
    :rtype: list
    """
    cleanedArticles = []
    try:
        top_headlines = newsapi.get_top_headlines(q = query, category = 'general')
        for i in top_headlines["articles"]:
            news = {"headline":i["title"],
                    "link": i["url"],
                    "source":"newsapi"
                    }
            cleanedArticles.append(news)
        print("Fetched Articles from NewsAPI")
from flask import Flask, request, abort
from flask import jsonify
from newsapi import NewsApiClient
import json
import datetime
import string
from collections import Counter as cnt
application = Flask(__name__, static_url_path='')

newsapi = NewsApiClient(api_key='cd8354b9683141f1a67cd5eb340f27bb')


@application.route('/index')
def hello_world():
    return application.send_static_file("hw6.html")


# @application.route("/data")
# def getdata():
#     return jsonify({"error":"Invalid email"})


@application.route("/getAllSources")
def getAllSources():
    mylist1 = []
    mydict1 = {}
    myid = ''
    myname = ''
    # cid = request.args.get('category')
    sources = newsapi.get_sources(language='en', country='us')
    for x in sources:
Example #30
0
def companyProfile(request):

    # job news for tech---------------------------------------------------------------
    newsapi = NewsApiClient(api_key="16c7fedbd9e44985909000cfc198020a")

    # Intel news
    intel_news = newsapi.get_everything(
        q='intel',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    intel_articles = intel_news['articles']

    intel_desc = []
    intel_news = []
    intel_url = []
    intel_time = []

    for i in range(3):
        myarticles = intel_articles[i]

        intel_news.append(myarticles['title'])
        intel_desc.append(myarticles['description'])
        intel_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        intel_time.append(p)

    intel_list = zip(intel_news, intel_desc, intel_url, intel_time)

    # Microsoft news
    microsoft_news = newsapi.get_everything(
        q='microsoft',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    microsoft_articles = microsoft_news['articles']

    microsoft_desc = []
    microsoft_news = []
    microsoft_url = []
    microsoft_time = []

    for i in range(3):
        myarticles = microsoft_articles[i]

        microsoft_news.append(myarticles['title'])
        microsoft_desc.append(myarticles['description'])
        microsoft_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        microsoft_time.append(p)

    microsoft_list = zip(microsoft_news, microsoft_desc, microsoft_url,
                         microsoft_time)

    # Google news
    google_news = newsapi.get_everything(
        q='google',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    google_articles = google_news['articles']

    google_desc = []
    google_news = []
    google_url = []
    google_time = []

    for i in range(3):
        myarticles = google_articles[i]

        google_news.append(myarticles['title'])
        google_desc.append(myarticles['description'])
        google_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        google_time.append(p)

    google_list = zip(google_news, google_desc, google_url, google_time)

    # IBM news
    IBM_news = newsapi.get_everything(
        q='IBM',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    IBM_articles = IBM_news['articles']

    IBM_desc = []
    IBM_news = []
    IBM_url = []
    IBM_time = []

    for i in range(3):
        myarticles = IBM_articles[i]

        IBM_news.append(myarticles['title'])
        IBM_desc.append(myarticles['description'])
        IBM_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        IBM_time.append(p)

    IBM_list = zip(IBM_news, IBM_desc, IBM_url, IBM_time)

    # TSMC news
    TSMC_news = newsapi.get_everything(
        q='TSMC',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    TSMC_articles = TSMC_news['articles']

    TSMC_desc = []
    TSMC_news = []
    TSMC_url = []
    TSMC_time = []

    #temporarily no news
    for i in range(1):
        myarticles = TSMC_articles[i]
        TSMC_news.append(myarticles['title'])
        TSMC_desc.append(myarticles['description'])
        TSMC_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        TSMC_time.append(p)

    TSMC_list = zip(TSMC_news, TSMC_desc, TSMC_url, TSMC_time)

    # morgan news
    morgan_news = newsapi.get_everything(
        q='morgan',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    morgan_articles = morgan_news['articles']

    morgan_desc = []
    morgan_news = []
    morgan_url = []
    morgan_time = []

    for i in range(3):
        myarticles = morgan_articles[i]

        morgan_news.append(myarticles['title'])
        morgan_desc.append(myarticles['description'])
        morgan_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        morgan_time.append(p)

    morgan_list = zip(morgan_news, morgan_desc, morgan_url, morgan_time)

    # hsbc news
    hsbc_news = newsapi.get_everything(
        q='hsbc',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    hsbc_articles = hsbc_news['articles']

    hsbc_desc = []
    hsbc_news = []
    hsbc_url = []
    hsbc_time = []

    for i in range(3):
        myarticles = hsbc_articles[i]

        hsbc_news.append(myarticles['title'])
        hsbc_desc.append(myarticles['description'])
        hsbc_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        hsbc_time.append(p)

    hsbc_list = zip(hsbc_news, hsbc_desc, hsbc_url, hsbc_time)

    # Citigroup news
    # citigroup_news = newsapi.get_everything(q='Citi',
    #                                   sources='bbc-news,the-verge, ',
    #                                   domains='bbc.co.uk,techcrunch.com',
    #                                   language='en',)

    # citigroup_articles = citigroup_news['articles']

    # citigroup_desc = []
    # citigroup_news = []
    # citigroup_url = []
    # citigroup_time = []

    # for i in range(3):
    #     myarticles = citigroup_articles[i]

    #     citigroup_news.append(myarticles['title'])
    #     citigroup_desc.append(myarticles['description'])
    #     citigroup_url.append(myarticles['url'])
    #     published_time = myarticles['publishedAt']
    #     p = published_time.replace('T', ' ').replace('Z', ' ')
    #     citigroup_time.append(p)

    #citigroup_list = zip(citigroup_news, citigroup_desc, citigroup_url, citigroup_time)

    # Goldman news
    Goldman_news = newsapi.get_everything(
        q='Goldman',
        sources='bbc-news,the-verge',
        domains='bbc.co.uk,techcrunch.com',
        language='en',
    )

    Goldman_articles = Goldman_news['articles']

    Goldman_desc = []
    Goldman_news = []
    Goldman_url = []
    Goldman_time = []

    for i in range(3):
        myarticles = Goldman_articles[i]

        Goldman_news.append(myarticles['title'])
        Goldman_desc.append(myarticles['description'])
        Goldman_url.append(myarticles['url'])
        published_time = myarticles['publishedAt']
        p = published_time.replace('T', ' ').replace('Z', ' ')
        Goldman_time.append(p)

    Goldman_list = zip(Goldman_news, Goldman_desc, Goldman_url, Goldman_time)

    return render(request, 'companyProfile.html', locals())