forked from golmic/Andaman
-
Notifications
You must be signed in to change notification settings - Fork 0
/
andaman.py
289 lines (238 loc) · 8.87 KB
/
andaman.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
# coding=utf-8
from Queue import Queue
import os
import re
import sys
import imp
from time import time
import traceback
import datetime
import scrapy
from scrapy import signals, Request, Item, log
from scrapy.crawler import Crawler
from scrapy.http import Response
from scrapy.settings import Settings
from twisted.internet import reactor
import conf
from spiders import AizouPipeline, AizouCrawlSpider
__author__ = 'zephyre'
def parse_args(args):
"""
解析命令行的参数
@param args:
@return:
"""
if len(args) == 1:
return {'cmd': None, 'param': None}
cmd = args[1]
# 如果以-开头,说明不是cmd,而是参数列表
if re.search('^\-', cmd):
cmd = None
param_idx = 1
else:
param_idx = 2
# 解析命令行参数
param_dict = {}
q = Queue()
for tmp in args[param_idx:]:
q.put(tmp)
param_name = None
param_value = None
while not q.empty():
term = q.get()
if re.search(r'^--(?=[^\-])', term):
tmp = re.sub('^-+', '', term)
if param_name:
param_dict[param_name] = param_value
param_name = tmp
param_value = None
elif re.search(r'^-(?=[^\-])', term):
tmp = re.sub('^-+', '', term)
for tmp in list(tmp):
if param_name:
param_dict[param_name] = param_value
param_value = None
param_name = tmp
else:
if param_name:
if param_value:
param_value.append(term)
else:
param_value = [term]
if param_name:
param_dict[param_name] = param_value
# # debug和debug-port是通用参数,表示将启用远程调试模块。
# if 'debug' in param_dict:
# if 'debug-port' in param_dict:
# port = int(param_dict['debug-port'][0])
# else:
# port = getattr(glob, 'DEBUG')['DEBUG_PORT']
# import pydevd
#
# pydevd.settrace('localhost', port=port, stdoutToServer=True, stderrToServer=True)
return {'cmd': cmd, 'param': param_dict}
def proc_crawler_settings(crawler):
"""
读取配置文件,进行相应的设置
"""
settings = crawler.settings
config = conf.load_yaml()
if 'scrapy' in config:
for key, value in config['scrapy'].items():
settings.set(key, value)
def setup_spider(spider_name, args):
import conf
crawler = Crawler(Settings())
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
proc_crawler_settings(crawler)
settings = crawler.settings
ret = parse_args(sys.argv)
settings.set('USER_PARAM', ret['param'])
settings.set('USER_ARGS', args)
settings.set('USER_AGENT', 'Aizou Chrome')
if args.proxy:
settings.set('DOWNLOADER_MIDDLEWARES', {'middlewares.ProxySwitchMiddleware': 300})
settings.set('PROXY_SWITCH_VERIFIER', 'baidu')
settings.set('PROXY_SWITCH_REFRESH_INTERVAL', 3600)
settings.set('SPIDER_MIDDLEWARES', {'middlewares.GoogleGeocodeMiddleware': 300})
settings.set('AUTOTHROTTLE_DEBUG', args.debug)
settings.set('AUTOTHROTTLE_ENABLED', args.throttle)
if spider_name in conf.global_conf['spiders']:
spider_class = conf.global_conf['spiders'][spider_name]
spider = spider_class.from_crawler(crawler)
spider_uuid = spider.uuid
# DRY_RUN: 只抓取,不调用Pipeline
if not args.dry:
# 查找对应的pipeline
settings.set('ITEM_PIPELINES', {tmp[0]: 100 for tmp in
filter(lambda p: spider_uuid in p[1], conf.global_conf['pipelines'])})
crawler.configure()
crawler.crawl(spider)
crawler.start()
# setattr(spider, 'param', param)
return spider
else:
return None
def reg_spiders(spider_dir=None):
"""
将spiders路径下的爬虫类进行注册
"""
if not spider_dir:
root_dir = os.path.normpath(os.path.split(__file__)[0])
spider_dir = os.path.normpath(os.path.join(root_dir, 'spiders'))
conf.global_conf['spiders'] = {}
conf.global_conf['pipelines'] = []
for cur, d_list, f_list in os.walk(spider_dir):
# 获得包路径
package_path = []
tmp = cur
while True:
d1, d2 = os.path.split(tmp)
package_path.insert(0, d2)
if d2 == 'spiders' or d1 == '/' or not d1:
break
tmp = d1
package_path = '.'.join(package_path)
for f in f_list:
f = os.path.normpath(os.path.join(cur, f))
tmp, ext = os.path.splitext(f)
if ext != '.py':
continue
p, fname = os.path.split(tmp)
try:
ret = imp.find_module(fname, [p]) if p else imp.find_module(fname)
mod = imp.load_module(fname, *ret)
for attr_name in dir(mod):
try:
c = getattr(mod, attr_name)
if issubclass(c, AizouCrawlSpider) and c != AizouCrawlSpider:
name = getattr(c, 'name')
if name:
conf.global_conf['spiders'][name] = c
elif issubclass(c, AizouPipeline) and c != AizouPipeline:
conf.global_conf['pipelines'].append(
[package_path + '.' + c.__module__ + '.' + c.__name__, getattr(c, 'spiders_uuid', [])])
except TypeError:
pass
except ImportError:
print 'Import error: %s' % fname
raise
def pipeline_proc(pipeline_list, item, spider):
for p in pipeline_list:
item = p.process_item(item, spider)
if not item:
break
item_cnt = 0
item_checkout_cnt = 0
ts_checkpoint = None
def request_proc(req, spider):
global item_cnt, ts_checkpoint, item_checkout_cnt
if isinstance(req, Request):
callback = req.callback
if not callback:
callback = spider.parse
response = Response('http://www.baidu.com', request=req)
ret = callback(response)
if hasattr(ret, '__iter__'):
for entry in ret:
request_proc(entry, spider)
else:
request_proc(ret, spider)
elif isinstance(req, Item):
item_cnt += 1
pipeline_proc(spider.pipeline_list, req, spider)
ts_now = time()
if ts_now - ts_checkpoint >= 60:
rate = int((item_cnt - item_checkout_cnt) / (ts_now - ts_checkpoint) * 60)
ts_checkpoint = ts_now
item_checkout_cnt = item_cnt
spider.log('Scraped %d items (at %d items/min)' % (item_cnt, rate), log.INFO)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('crawler')
parser.add_argument('--throttle', action='store_true')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--log2file', action='store_true')
parser.add_argument('--logpath', type=str)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--proxy', action='store_true')
args, leftovers = parser.parse_known_args()
msg = 'SPIDER STARTED: %s' % ' '.join(sys.argv)
spider_name = args.crawler
log_path = args.logpath if args.logpath else '/var/log/andaman'
if args.log2file:
try:
os.mkdir(log_path)
except OSError:
pass
logfile = os.path.join(log_path, '%s_%s.log' % (spider_name, datetime.datetime.now().strftime('%Y%m%d')))
else:
logfile = None
s = setup_spider(spider_name, args)
if s:
s.arg_parser = parser
scrapy.log.start(logfile=logfile, loglevel=scrapy.log.DEBUG if args.debug else scrapy.log.INFO)
s.log(msg, scrapy.log.INFO)
if 'no-scrapy' in args:
ts_checkpoint = time()
s.pipeline_list = s.crawler.engine.scraper.itemproc.middlewares
for ret in s.start_requests():
try:
request_proc(ret, s)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
s.log('Error while processing: %s' % ret)
s.log(''.join(lines), log.ERROR)
else:
reactor.run() # the script will block here until the spider_closed signal was sent
else:
scrapy.log.start(logfile=logfile, loglevel=scrapy.log.DEBUG if args.debug else scrapy.log.INFO)
scrapy.log.msg('Cannot find spider: %s' % spider_name, scrapy.log.CRITICAL)
if __name__ == "__main__":
old_dir = os.getcwd()
os.chdir(os.path.normpath(os.path.split(__file__)[0]))
reg_spiders()
main()
os.chdir(old_dir)