def get_old_news(self): news = newsapi.NewsApiClient(self.api_key) end_time = dt.datetime.strptime( min(os.listdir(self.path)).split('.')[0], '%Y-%m-%dT%H%M%S') i = 0 while (i < self.daily_limit): i += 1 start_time = end_time - \ dt.timedelta(hours=self.hours_gap, minutes=self.minutes_gap) try: all_articles = news.get_everything( q='coronavirus', from_param=start_time.isoformat(timespec='seconds'), to=end_time.isoformat(timespec='seconds'), language='en', page_size=100, page=1) end_time -= dt.timedelta(hours=self.hours_gap, minutes=self.minutes_gap) f = open( self.path + start_time.isoformat(timespec='seconds').replace(':', '') + '.json', 'w') f.write(json.dumps(all_articles['articles'])) f.close() except newsapi.newsapi_exception.NewsAPIException: print('daily limit reached') break
def main(): # read api key from config file config = configparser.ConfigParser() config.read('secret.cfg') api_key = config['api']['api_key'] # read supported sources and countries from config config.read('pnews.cfg') sources_file = config['paths']['sources'] countries_file = config['paths']['countries'] sources = read_file(sources_file) countries = read_file(countries_file) args = get_cmd_input() page = args.page site = args.site query = args.query if site is not None and site not in sources: sys.exit('Source ' + site + ' is not supported\n' + 'Please see list of supported sources at https://newsapi.org/sources') # call api news = newsapi.NewsApiClient(api_key=api_key) if query == 'top': headlines = news.get_top_headlines(sources=site, language='en', page_size=page) print_header(page, site=site, ) else: headlines = news.get_everything(q=query, sources=site, language='en', page_size=page) print_header(page, site=site, query=query) for article in headlines['articles']: print_output(article)
def get_news(self): news = newsapi.NewsApiClient(self.api_key) current_time = dt.datetime.utcnow() start_time = dt.datetime.strptime( max(os.listdir(self.path)).split('.')[0], '%Y-%m-%dT%H%M%S') i = 0 while (i < self.daily_limit and start_time + dt.timedelta(hours=self.hours_gap, minutes=self.minutes_gap) <= current_time): i += 1 start_time += dt.timedelta(hours=self.hours_gap, minutes=self.minutes_gap) all_articles = news.get_everything( q='coronavirus', from_param=start_time.isoformat(timespec='seconds'), to=(start_time + dt.timedelta( hours=self.hours_gap, minutes=self.minutes_gap)).isoformat( timespec='seconds'), language='en', page_size=100, page=1) f = open( self.path + start_time.isoformat(timespec='seconds').replace(':', '') + '.json', 'w') f.write(json.dumps(all_articles['articles'])) f.close()
def get_news_for_day(self, day, sources, page=1): try: return self.api.get_everything(sources=sources, from_param=day, to=day, page=page, language='en', sort_by=SORT_KEY['popularity']) except newsapi.newsapi_exception.NewsAPIException: self.api_key = self.next_api_key() self.api = newsapi.NewsApiClient(api_key=self.api_key) return self.get_news_for_day(day, sources, page=page)
def configure_news_api(self): """Configures the News API.""" self.news_api = newsapi.NewsApiClient(self.api_key)
def request_api(cat=None, query=None): import newsapi newsapi = newsapi.NewsApiClient(api_key='fd1e2db0438341cbbd62315ed0db78bd') data = newsapi.get_top_headlines(q=query, language='ru', country='ru', category=cat) return data
def wikisearch(): try: ask_for_search_type = input('News/Information: ') if ask_for_search_type[0].lower( ) == 'n' or 'news' in ask_for_search_type.lower(): welcome_to_newsworld = "Okay.Please let me know what kinds of news you are looking for." greatings(welcome_to_newsworld) my_api_key = '38df851456f641ea9df315b23bc19ffa' news_client = newsapi.NewsApiClient(api_key=my_api_key) user_input = input('News topic: ') news = news_client.get_everything(q=user_input, language='en', page_size=100)['articles'] news_ammount = len(news) news_find_time = "Give me few seconds.I am searching through more than 30 world's famous newspapers about your news topic" greatings(news_find_time) if news_ammount == 0: text = f"Umm!It seems there is no news today about your topic.Make sure you enter a right topic.And just enter the topic name and nothing else." greatings(text) elif news_ammount <= 5: greatings('Yep.I am done.Here it is') j = 1 for x in news: title = x['title'] description = x['description'] final_news = title + description n_count = f'News:{j}' greatings(n_count) greatings(final_news) j += 1 else: user_demand = f"Yeo.I am done.i have found {news_ammount} news about your topic.How many news you want to listen?" greatings(user_demand) news_number = input('Amount of news: ') i = 0 j = 1 for x in news: if i < int(news_number): title = x['title'] description = x['description'] final_news = title + description n_count = f"News{j}" greatings(n_count) greatings(final_news) i += 1 j += 1 else: greatings('Okay.What kind of information you are looking for?') search = input('Search:') search_result = wikipedia.search(search) greatings( 'Give me few seconds.I am colllecting information on your topic.' ) search_summary = wikipedia.summary(search_result) x = f"Yep.I am done.Here it is.{search_summary}" greatings(x) except BaseException: return False
def __init__(self, api_key): self._client = newsapi.NewsApiClient(api_key)
def __init__(self): self.api_key_index = 0 self.api_key = self.next_api_key() self.api = newsapi.NewsApiClient(api_key=self.api_key)
#! /usr/bin/env python3 import os import requests import newsapi import re import pprint import json def remove_non_ascii(text): return ''.join(i for i in text if ord(i)<128) if __name__ == "__main__": newsapi_key = os.environ['NEWSAPI_KEY'] api = newsapi.NewsApiClient(api_key=newsapi_key) newapi_url = "https://newsapi.org/v2/" sources = "sources?apiKey={}".format(newsapi_key) #print(api.get_sources()) #print(api.get_top_headlines(sources='bbc-news')) r = requests.get(newapi_url+sources) text = r.content.decode('utf-8') new_text = remove_non_ascii(text) new_json = json.loads(new_text) #print(new_json['sources']) json_sources = new_json['sources'] for x in json_sources: print("ID: {}".format(x['id'])) print("Name: {}".format(x['name'])) print("Description: {}".format(x['description']))
# -*- coding:utf-8 -*- import sys import io import csv import time import json import random import hashlib import requests import newsapi from prettytable import PrettyTable newsApi = newsapi.NewsApiClient(api_key='xxxxx') pagesize = 100 appKey = 'xxxxxx' secretKey = 'xxxxxx' def inofs(): print(""" ################################################################### ------------------------说明--------------------------------------- ForeignNewsSearch是一个基于开源项目NewsAPI.org编写的命令行工具,授权条款 为MIT License。因接口调用次数有限,每日下载文章数理论值约为10万篇,请酌情 使用。如有疑问请联系作者或访问https://newsapi.org. ------------------------说明--------------------------------------- ################################################################### """) pargram_fuc()
import newsapi as n try: import apiai except ImportError: sys.path.append( os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) import apiai APIAI_CLIENT_ACCESS_TOKEN = os.environ['APIAI_CLIENT_ACCESS_TOKEN'] NEWSAPI_TOKEN = os.environ['NEWSAPI_TOKEN'] MONGO_URI = os.environ['MONGODB_URI'] PORT = int(os.environ.get('PORT', '5000')) BOT_TOKEN = os.environ['BOT_TOKEN'] newsapi = n.NewsApiClient(api_key=NEWSAPI_TOKEN) ai = apiai.ApiAI(APIAI_CLIENT_ACCESS_TOKEN) client = pymongo.MongoClient(MONGO_URI) db = client.get_default_database() users = db['users'] newsLists = db['newslists'] ##newsapi = "https://newsapi.org/v2/top-headlines?sources=" news_keyboard = telegram.replykeyboardmarkup.ReplyKeyboardMarkup( [[telegram.KeyboardButton("google news")], [telegram.KeyboardButton("the hindu")], [ telegram.KeyboardButton("axios") ], [telegram.KeyboardButton("the guardian")], [telegram.KeyboardButton("new york times")],
import pymongo import newsapi as ns import re # user credentials to access the news API my_key = ('dc26749994374766a2394e60ac6bd447') # set up news api to be used to pull data newsapi = ns.NewsApiClient(api_key=my_key) # setup db and collection for MongoDB my_client = pymongo.MongoClient("mongodb://127.0.0.1:27017/") my_db = my_client["Asgmt3"] my_collection = my_db["news"] news = [] # empty list to which new_details obj will be added print("Initializing search...") # search keywords search_keywords = [ 'Canada', 'University', 'Dalhousie University', 'Halifax', 'Canada Education', 'Moncton', 'Toronto' ] # API call from https://newsapi.org/ for keyword in search_keywords: f = open("output.txt", "a+") for article in newsapi.get_everything(q=keyword, page_size=100)['articles']: news_details = { 'description': article['description'],
def get_news_api(): if 'news_api' not in g: with open('api_key.txt', 'r') as api_key: g.news_api = newsapi.NewsApiClient(api_key=api_key.read().strip()) return g.news_api