forked from openeventdata/scraper
/
scraper.py
156 lines (134 loc) · 7.43 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import os
import re
import glob
import logging
import pattern.web
import pages_scrape
import mongo_connection
from goose import Goose
from pymongo import MongoClient
from ConfigParser import ConfigParser
def scrape_func(address, website, COLL):
"""
Function to scrape various RSS feeds. Uses the 'keep' and 'ignore'
iterables to define which words should be used in the text search.
Parameters
------
address : String
Address for the RSS feed to scrape.
name : String
Nickname for the RSS feed being scraped.
COLL : String
Collection within MongoDB that holds the scraped data.
"""
#Setup the database
connection = MongoClient()
db = connection.event_scrape
collection = db[COLL]
#Scrape the RSS feed
try:
results = pattern.web.Newsfeed().search(address, count=100,
cached=False)
logger.info('There are {} results from {}'.format(len(results),
website))
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem fetching RSS feed for {}. {}'.format(address,
e))
results = None
#Pursue each link in the feed
if results:
goose_extractor = Goose({'use_meta_language': False,
'target_language': 'en'})
for result in results:
if website == 'xinhua':
page_url = result.url.replace('"', '')
page_url = page_url.encode('ascii')
elif website == 'upi':
page_url = result.url.encode('ascii')
else:
page_url = result.url
try:
text, meta = pages_scrape.scrape(page_url, goose_extractor)
text = text.encode('utf-8')
except TypeError:
logger.warning('Problem obtaining text from URL: {}'.format(page_url))
text = ''
if text:
if website == 'bbc':
text = text.replace("This page is best viewed in an up-to-date web browser with style sheets (CSS) enabled. While you will be able to view the content of this page in your current browser, you will not be able to get the full visual experience. Please consider upgrading your browser software or enabling style sheets (CSS) if you are able to do so.", '')
if website == 'almonitor':
text = re.sub("^.*?\(photo by REUTERS.*?\)", "", text)
if website == 'menafn_algeria' or website == 'menafn_bahrain' or website == 'menafn_egypt' or website == 'menafn_iraq' or website == 'menafn_jordan' or website == 'menafn_kuwait' or website == 'menafn_lebanon' or website == 'menafn_morocco' or website == 'menafn_oman' or website == 'menafn_palestine' or website == 'menafn_qatar' or website == 'menafn_saudi' or website == 'menafn_syria' or website == 'menafn_tunisia' or website == 'menafn_turkey' or website == 'menafn_uae' or website == 'menafn_yemen':
text = re.sub("^\(.*?MENAFN.*?\)", "", text)
elif website == 'upi':
text = text.replace("Since 1907, United Press International (UPI) has been a leading provider of critical information to media outlets, businesses, governments and researchers worldwide. UPI is a global operation with offices in Beirut, Hong Kong, London, Santiago, Seoul and Tokyo. Our headquarters is located in downtown Washington, DC, surrounded by major international policy-making governmental and non-governmental organizations. UPI licenses content directly to print outlets, online media and institutions of all types. In addition, UPI's distribution partners provide our content to thousands of businesses, policy groups and academic institutions worldwide. Our audience consists of millions of decision-makers who depend on UPI's insightful and analytical stories to make better business or policy decisions. In the year of our 107th anniversary, our company strives to continue being a leading and trusted source for news, analysis and insight for readers around the world.", '')
entry_id = mongo_connection.add_entry(collection, text,
result.title, result.url,
result.date, website)
if entry_id:
logger.info('Added entry from {} with id {}'.format(result.url,
entry_id))
else:
logger.info('Result from {} already in database'.format(result.url,
entry_id))
logger.info('Scrape of {} finished'.format(website))
def call_scrape_func(siteList, db_collection):
"""
Helper function to iterate over a list of RSS feeds and scrape each.
Parameters
----------
siteList : dictionary
Dictionary of sites, with a nickname as the key and RSS URL
as the value.
"""
for website in siteList:
scrape_func(siteList[website], website, db_collection)
logger.info('Completed full scrape.')
def parse_config():
"""Function to parse the config file."""
config_file = glob.glob('config.ini')
parser = ConfigParser()
if config_file:
logger.info('Found a config file in working directory')
parser.read(config_file)
try:
collection = parser.get('Database', 'collection_list')
whitelist = parser.get('URLS', 'file')
return collection, whitelist
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem parsing config file. {}'.format(e))
else:
cwd = os.path.abspath(os.path.dirname(__file__))
config_file = os.path.join(cwd, 'default_config.ini')
parser.read(config_file)
logger.info('No config found. Using default.')
try:
collection = parser.get('Database', 'collection_list')
whitelist = parser.get('URLS', 'file')
return collection, whitelist
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem parsing config file. {}'.format(e))
if __name__ == '__main__':
#Setup the logging
logger = logging.getLogger('scraper_log')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('scraping_log.log', 'a')
formatter = logging.Formatter('%(levelname)s %(asctime)s: %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Running in scheduled hourly mode')
print 'Running. See log file for further information.'
#Get the info from the config
db_collection, whitelist_file = parse_config()
#Convert from CSV of URLs to a dictionary
try:
url_whitelist = open(whitelist_file, 'r').readlines()
url_whitelist = [line.split(',') for line in url_whitelist if line]
to_scrape = {listing[0]: listing[1] for listing in url_whitelist}
except IOError:
print 'There was an error. Check the log file for more information.'
logger.warning('Could not open URL whitelist file.')
call_scrape_func(to_scrape, db_collection)