/
concurrent_gevent_taomm.py
343 lines (267 loc) · 10.2 KB
/
concurrent_gevent_taomm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
#!/usr/bin/env python3
"""
Python version: > 2.7
Dependence: gevent greenlet requests BeautifulSoup
gevent协程版本
爬虫类
从淘女郎网站(https://mm.taobao.com)获取图片链接并下载,按照地区、相册名、姓名分类
"""
import gevent
import sys
from gevent import monkey
monkey.patch_all()
import requests
import contextlib
import os
import re
import json
import time
import logging
import argparse
from bs4 import BeautifulSoup
# 第一页
FIRST_PAGE = 1
# 淘女郎列表页面
user_list = 'https://mm.taobao.com/json/request_top_list.htm?page={}'
# 淘女郎信息页
user_info = 'https://mm.taobao.com/self/info/model_info_show.htm?user_id={}'
# 淘女郎相册列表页面
album_list = 'https://mm.taobao.com/self/album/open_album_list.htm?user_id={}&page={}'
# 淘女郎相册json
photo_list = 'https://mm.taobao.com/album/json/get_album_photo_list.htm?user_id={}&album_id={}&page={}'
def cli():
# setting argparser
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', type=int, default=1, help='Max user page to fetch.')
parser.add_argument('-a', '--album', type=int, default=1, help='Max album page to fetch.')
parser.add_argument('-p', '--photo', type=int, default=1, help='Max photo page to fetch.')
parser.add_argument('-d', '--download', action='store_true', default=False, help='Download photos from url.')
parser.add_argument('-l', '--loglevel', default='INFO', help='Loglevel [DEBUG | INFO | ERROR]. Default: NOTSET')
args = parser.parse_args()
# setting logging configuration
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
logging.basicConfig(style='{', format='{asctime} {levelname} {funcName} {msg}', level=numeric_level)
return args
@contextlib.contextmanager
def timer(title='default'):
start = time.time()
yield
logging.info('{}::{:.3f}s'.format(title, time.time() - start))
class Photo(gevent.Greenlet):
g_count = 0
def __init__(self, id, url, album_name, user_name, location, session):
super(Photo, self).__init__()
self._id = id
self._url = 'https:' + url
self._user_name = user_name
self._album_name = album_name
self._location = location
self._session = session
self._path = os.path.join(os.getcwd(), 'taomm', self._location, self._user_name, self._album_name)
os.makedirs(self._path, exist_ok=True)
def _run(self):
if args.download:
# 获取image内容
image = self.fetch(self._url)
# 开线程保存到文件
self.save(image)
logging.debug(self)
Photo.g_count += 1
# 异步获取页面内容
def fetch(self, url):
return self._session.get(url).text
def save(self, image):
path = self._path + '\\' + self._id + '.jpg'
with open(path, 'wb') as f:
f.write(image)
def __repr__(self):
return '<Photo(id={} url={})>'.format(self._id, self._url)
class Album(gevent.Greenlet):
def __init__(self, id, name, user_id, user_name, location, *, session):
super(Album, self).__init__()
self._id = id
self._user_id = user_id
self._name = name
self._user_name = user_name
self._location = location
self._photos = []
self._session = session
def _run(self):
# 异步获取照片列表
self.get_photos()
logging.debug(self)
# 等待照片保存任务完成
gevent.joinall(self._photos)
def get_page_nums(self):
# get users list page nums
photo_list_url = photo_list.format(self._user_id, self._id, FIRST_PAGE)
resp = self.fetch(photo_list_url)
return self.parse_page_nums(resp)
def get_photo_by_page(self, page):
photo_list_url = photo_list.format(self._user_id, self._id, page)
resp = self.fetch(photo_list_url)
return self.parse_photo_url(resp)
# 异步获取页面内容
def fetch(self, url):
return self._session.get(url).text
@staticmethod
def parse_page_nums(resp):
json_data = json.loads(resp)
pages = int(json_data['totalPage'])
return pages
def parse_photo_url(self, resp):
json_data = json.loads(resp)
photos = json_data['picList']
photo_items = []
for photo in photos:
photo = Photo(photo['picId'],
photo['picUrl'],
self._name,
self._user_name,
self._location,
session=self._session)
photo_items.append(photo)
return photo_items
def get_photos(self):
# 获取照片页面数
pages = self.get_page_nums()
# 获取照片列表
tasks = [gevent.spawn(self.get_photo_by_page, page + 1) for page in range(min(args.photo, pages))]
for task in gevent.iwait(tasks):
photo_objs = task.get()
for photo in photo_objs:
photo.start()
self._photos.append(photo)
def __repr__(self):
return '<Album(id={} name={} user={})>'.format(self._id, self._name, self._user_name)
class User(gevent.Greenlet):
def __init__(self, id, *, session):
super(User, self).__init__()
self._id = id
self._name = ''
self._location = ''
self._albums = []
self._session = session
def _run(self):
# 获取用户信息
self.get_info()
logging.debug(self)
# 获取相册
self.get_albums()
# 等待完成
gevent.joinall(self._albums)
def get_page_nums(self):
# get users list page nums
album_list_url = album_list.format(self._id, FIRST_PAGE)
resp = self.fetch(album_list_url)
return self.parse_page_nums(resp)
def get_album_by_page(self, page):
album_list_url = album_list.format(self._id, page)
resp = self.fetch(album_list_url)
return self.parse_album_id(resp)
# 异步获取页面内容
def fetch(self, url):
return self._session.get(url).text
@staticmethod
def parse_page_nums(resp):
soup = BeautifulSoup(resp, 'html.parser')
pages = int(soup.find('input', id='J_Totalpage').get('value', 0))
return pages
def parse_user_info(self, resp):
soup = BeautifulSoup(resp, 'html.parser')
self._name = soup.find('ul', class_='mm-p-info-cell clearfix').li.span.text
self._location = soup.find('li', class_='mm-p-cell-right').span.text
def parse_album_id(self, resp):
soup = BeautifulSoup(resp, 'html.parser')
pattern = re.compile(r'album_id=(\d+)')
album_items = []
tags = soup.select('h4 a')
for tag in tags:
match = pattern.search(tag['href'])
if match:
album_id = match.group(1)
album_name = tag.text.strip().replace('.', '').strip()
album = Album(album_id,
album_name,
self._id,
self._name,
self._location,
session=self._session)
album_items.append(album)
return album_items
def get_info(self):
user_info_url = user_info.format(self._id)
resp = self.fetch(user_info_url)
self.parse_user_info(resp)
def get_albums(self):
# 获取相册页面数
pages = self.get_page_nums()
# 获取相册列表
tasks = [gevent.spawn(self.get_album_by_page, page + 1) for page in range(min(args.album, pages))]
for task in gevent.iwait(tasks):
album_objs = task.get()
for album in album_objs:
album.start()
self._albums.append(album)
def __repr__(self):
return '<User(id={} name={})>'.format(self._id, self._name)
class Manager(gevent.Greenlet):
def __init__(self):
super(Manager, self).__init__()
self._users = []
self._session = requests.Session()
def _run(self):
# 创建User's Greenlet对象
self.get_users()
# 等待完成
gevent.joinall(self._users)
# 关闭session
self._session.close()
def get_user_pages(self):
# 第一页的用户页面URL
user_list_url = user_list.format(FIRST_PAGE)
# 异步获取页面内容并返回页数
resp = self.fetch(user_list_url)
return self.parse_page_nums(resp)
def get_user_by_page(self, page):
# 第N页的用户页面URL
user_list_url = user_list.format(page)
# 异步获取页面内容并返回页数
resp = self.fetch(user_list_url)
return self.parse_user_id(resp)
def get_users(self):
# 获取用户页数
pages = self.get_user_pages()
# 获取用户列表
tasks = [gevent.spawn(self.get_user_by_page, page + 1) for page in range(min(args.user, pages))]
for task in gevent.iwait(tasks):
user_ids = task.get()
for user_id in user_ids:
user = User(user_id, session=self._session)
user.start()
self._users.append(user)
# 异步获取页面内容
def fetch(self, url):
return self._session.get(url).text
@staticmethod
def parse_page_nums(content):
soup = BeautifulSoup(content, 'html.parser')
pages = int(soup.find('input', id='J_Totalpage').get('value', 0))
return pages
@staticmethod
def parse_user_id(content):
soup = BeautifulSoup(content, 'html.parser')
return [item['data-userid'] for item in soup.find_all('span', class_='friend-follow J_FriendFollow')]
def __repr__(self):
return '<Manager(users_num={})>'.format(len(self._users))
if __name__ == '__main__':
# 获取命令行参数
args = cli()
# 计时
with timer('main'):
manager = Manager()
manager.start()
gevent.joinall([manager])
logging.info('{} photos fetched.'.format(Photo.g_count))