def nytimes_articles(): query = request.get_data() if symbol_map.has_key(query): query = query + ' ' + symbol_map[query] print query search_obj = nytimes.get_article_search_obj(nytimes_api_key) result = search_obj.article_search(q=query, sort="newest", fl="headline,pub_date,lead_paragraph,web_url") try: docs = result['response']['docs'] for doc in docs: print jsonify(doc) except Exception, e: raise e
def nytimes_sentiment(query): search = nytimes.get_article_search_obj('2f5b350f0e83494221d16b3671f89af9:5:74041247') result = search.article_search(q=query, sort='newest', fl='headline,pub_date,lead_paragraph,web_url') articles = result['response']['docs'] headlines = [] pos_headlines = 0 neg_headlines = 0 for article in articles: headline = article['headline']['main'] headlines.append(headline) for headline in headlines: blob = TextBlob(headline) if blob.sentiment.polarity > 0: pos_headlines += 1 else: neg_headlines += 1 result = [pos_headlines, neg_headlines] return result
import json import nytimes import time import csv import re search_obj = nytimes.get_article_search_obj('d66d7d7ec5264a9491a9a32cd3960d9b') data = [] for x in range(200): try: print("Page %d" % (x)) f = search_obj.article_search(q='trump', fl=['headline'], begin_date='20180301', page=str(x), sort='newest') except: break try: # print f['response'] for k in f['response']['docs']: title = k['headline']['print_headline'].encode('utf-8') if title: data.append([title]) # print (data) time.sleep(1) except: pass
def init_nyt_articles(self): creds = self.read_credentials('nyt_credentials.txt') search_api = nytimes.get_article_search_obj(creds['article_search_api_key']) return search_api
import nytimes import datetime as dt import json search_obj = nytimes.get_article_search_obj([API_KEY]) article_search_frontpage(q = None, num_pages = 1, begin_date = None, end_date = None, filename = None) if __name__=='__main__': sample = search_obj.article_search_frontpage(begin_date=19570907,end_date=19570907) print(len(sample))