def open_spider(self, spider): #获取数据库中微博内容最新时间戳 if spider.name in ('keyweibocontent', 'weibocontent_userinfo', 'weibocontent_danger', 'weibocontent_userinfo_intime'): db = OracleStore() conn = db.get_connection() sql = "select * from t_user_weibocontent where userID = '%s' order by publishTimeStamp desc" % str( spider.uid) cursor = db.select_operation(conn, sql) count = cursor.fetchone() if not count: #count为None,即数据库中没有该用户微博及时间戳的数据 WeibospiderPipeline.weibocontent_timestamp = None else: WeibospiderPipeline.weibocontent_timestamp = count[ 6] #获取数据库中最新的时间戳(publishTimeStamp)字段 db.close_connection(conn, cursor) if spider.name == 'keyuser': db = OracleStore() conn = db.get_connection() sql = "select * from t_user_keyword where keyword = '%s' order by publishTimeStamp desc" % str( spider.keyword) cursor = db.select_operation(conn, sql) count = cursor.fetchone() if not count: #count为None,即数据库中没有该关键词搜索结果及时间戳的数据 WeibospiderPipeline.keyword_timestamp = None else: WeibospiderPipeline.keyword_timestamp = count[ 4] #获取数据库中最新的时间戳(publishTimeStamp)字段 db.close_connection(conn, cursor)
def start_getweiboinfo(self,response): db = OracleStore();conn = db.get_connection() sql1 = '''select * from t_user_info''' cursor1 = db.select_operation(conn,sql1) sql2 = '''select count(*) from t_user_info''' cursor2 = db.select_operation(conn,sql2) count = cursor2.fetchone() if count[0]: for i in range(count[0]): for result in cursor1.fetchmany(1): mainpageurl = 'http://weibo.com/u/'+str(result[0])+'?from=otherprofile&wvr=3.6&loc=tagweibo' GetWeibopage.data['uid'] = result[0] getweibopage = GetWeibopage() for page in range(WeiboSpider.page_num): GetWeibopage.data['page'] = page+1 firstloadurl = mainpageurl + getweibopage.get_firstloadurl() yield Request(url=firstloadurl,meta={'cookiejar':response.meta['cookiejar'],'uid':result[0]},callback=self.parse_load) secondloadurl = mainpageurl + getweibopage.get_secondloadurl() yield Request(url=secondloadurl,meta={'cookiejar':response.meta['cookiejar'],'uid':result[0]},callback=self.parse_load) thirdloadurl = mainpageurl + getweibopage.get_thirdloadurl() yield Request(url=thirdloadurl,meta={'cookiejar':response.meta['cookiejar'],'uid':result[0]},callback=self.parse_load) else: yield None db.close_connection(conn,cursor1,cursor2)
def parse_load(self, response): user_info = userinfo.WeiboSpider() request_url = response.request.url p = re.compile('&pre_page=(\d).*&page=(\d)') #用于判断是第一页的第一次加载 match = p.search(request_url) if int(match.group(1)) == 0 and int(match.group(2)) == 1: #进行用户信息的获取 db = OracleStore() conn = db.get_connection() sql = "select count(*) from t_user_info where userID='%s'" % self.uid cursor = db.select_operation(conn, sql) count = cursor.fetchone() if not count[0]: #若没有爬取过该uid用户,则爬取用户基本信息 analyzer = Analyzer() total_pq = analyzer.get_html( response.body, 'script:contains("PCD_person_info")') user_property = analyzer.get_userproperty(total_pq) if user_property == 'icon_verify_co_v': #该账号为公众账号 public_userinfo_url = analyzer.get_public_userinfohref( total_pq) #yield Request(url=public_userinfo_url,meta={'cookiejar':response.meta['cookiejar'],'uid':response.meta['uid'],'user_property':user_property},callback=self.parse_public_userinfo) 暂时不处理公众账号,需要数据库设置外键 else: userinfo_url = analyzer.get_userinfohref(total_pq) yield Request(url=userinfo_url, meta={ 'cookiejar': response.meta['cookiejar'], 'uid': response.meta['uid'], 'user_property': user_property }, callback=self.parse_userinfo) db.close_connection(conn, cursor) item = WeibospiderItem() #获取用户微博信息及@用户信息 analyzer = Analyzer() friendcircle = FriendCircle() total_pq = analyzer.get_mainhtml(response.body) item['uid'] = response.meta['uid'] item['content'] = analyzer.get_content(total_pq) item['time'], item['timestamp'] = analyzer.get_time(total_pq) atuser_info, item['repost_user'] = analyzer.get_atuser_repostuser( total_pq) atuser_list = friendcircle.atuser_parser(atuser_info) item['atuser_nickname_list'] = atuser_list yield item for atuser_inlist in atuser_list: if atuser_inlist != []: for atuser in atuser_inlist: uid_url = "http://s.weibo.com/user/" + quote( quote(str(atuser))) + "&Refer=SUer_box" yield Request(url=uid_url, meta={ 'cookiejar': response.meta['cookiejar'], 'uid': self.uid, 'atuser_nickname': atuser }, callback=self.parse_atuser_uid) else: continue
def start_getweiboinfo(self, response): db = OracleStore() conn = db.get_connection() sql1 = '''select * from t_user_info''' cursor1 = db.select_operation(conn, sql1) sql2 = '''select count(*) from t_user_info''' cursor2 = db.select_operation(conn, sql2) count = cursor2.fetchone() if count[0]: for i in range(count[0]): for result in cursor1.fetchmany(1): mainpageurl = 'http://weibo.com/u/' + str( result[0]) + '?from=otherprofile&wvr=3.6&loc=tagweibo' GetWeibopage.data['uid'] = result[0] getweibopage = GetWeibopage() for page in range(WeiboSpider.page_num): GetWeibopage.data['page'] = page + 1 firstloadurl = mainpageurl + getweibopage.get_firstloadurl( ) yield Request(url=firstloadurl, meta={ 'cookiejar': response.meta['cookiejar'], 'uid': result[0] }, callback=self.parse_load) secondloadurl = mainpageurl + getweibopage.get_secondloadurl( ) yield Request(url=secondloadurl, meta={ 'cookiejar': response.meta['cookiejar'], 'uid': result[0] }, callback=self.parse_load) thirdloadurl = mainpageurl + getweibopage.get_thirdloadurl( ) yield Request(url=thirdloadurl, meta={ 'cookiejar': response.meta['cookiejar'], 'uid': result[0] }, callback=self.parse_load) else: yield None db.close_connection(conn, cursor1, cursor2)
def parse_load(self,response): user_info = userinfo.WeiboSpider() request_url = response.request.url p=re.compile('&pre_page=(\d).*&page=(\d)') #用于判断是第一页的第一次加载 match = p.search(request_url) if int(match.group(1)) == 0 and int(match.group(2)) == 1: #进行用户信息的获取 db = OracleStore();conn = db.get_connection() sql = "select count(*) from t_user_info where userID='%s'" % self.uid cursor = db.select_operation(conn,sql);count = cursor.fetchone() if not count[0]: #若没有爬取过该uid用户,则爬取用户基本信息 analyzer = Analyzer() total_pq = analyzer.get_html(response.body,'script:contains("PCD_person_info")') user_property = analyzer.get_userproperty(total_pq) if user_property == 'icon_verify_co_v': #该账号为公众账号 public_userinfo_url = analyzer.get_public_userinfohref(total_pq) #yield Request(url=public_userinfo_url,meta={'cookiejar':response.meta['cookiejar'],'uid':response.meta['uid'],'user_property':user_property},callback=self.parse_public_userinfo) 暂时不处理公众账号,需要数据库设置外键 else: userinfo_url = analyzer.get_userinfohref(total_pq) yield Request(url=userinfo_url,meta={'cookiejar':response.meta['cookiejar'],'uid':response.meta['uid'],'user_property':user_property},callback=self.parse_userinfo) db.close_connection(conn,cursor) item = WeibospiderItem() #获取用户微博信息及@用户信息 analyzer = Analyzer() friendcircle = FriendCircle() total_pq = analyzer.get_mainhtml(response.body) item['uid'] = response.meta['uid'] item['content'] = analyzer.get_content(total_pq) item['time'],item['timestamp'] = analyzer.get_time(total_pq) atuser_info,item['repost_user'] = analyzer.get_atuser_repostuser(total_pq) atuser_list = friendcircle.atuser_parser(atuser_info) item['atuser_nickname_list'] = atuser_list yield item for atuser_inlist in atuser_list: if atuser_inlist != []: for atuser in atuser_inlist: uid_url = "http://s.weibo.com/user/"+quote(quote(str(atuser)))+"&Refer=SUer_box" yield Request(url=uid_url,meta={'cookiejar':response.meta['cookiejar'],'uid':self.uid,'atuser_nickname':atuser},callback=self.parse_atuser_uid) else: continue
def open_spider(self,spider): #获取数据库中微博内容最新时间戳 if spider.name in ('keyweibocontent','weibocontent_userinfo','weibocontent_danger','weibocontent_userinfo_intime'): db=OracleStore();conn = db.get_connection() sql = "select * from t_user_weibocontent where userID = '%s' order by publishTimeStamp desc" % str(spider.uid) cursor = db.select_operation(conn,sql) count = cursor.fetchone() if not count: #count为None,即数据库中没有该用户微博及时间戳的数据 WeibospiderPipeline.weibocontent_timestamp = None else: WeibospiderPipeline.weibocontent_timestamp = count[6] #获取数据库中最新的时间戳(publishTimeStamp)字段 db.close_connection(conn,cursor) if spider.name == 'keyuser': db=OracleStore();conn = db.get_connection() sql = "select * from t_user_keyword where keyword = '%s' order by publishTimeStamp desc" % str(spider.keyword) cursor = db.select_operation(conn,sql) count = cursor.fetchone() if not count: #count为None,即数据库中没有该关键词搜索结果及时间戳的数据 WeibospiderPipeline.keyword_timestamp = None else: WeibospiderPipeline.keyword_timestamp = count[4] #获取数据库中最新的时间戳(publishTimeStamp)字段 db.close_connection(conn,cursor)
def closed(self, reason): db = OracleStore() conn = db.get_connection() db.close_connection(conn) print "--------closed-------"
def closed(self, reason): db = OracleStore() conn = db.get_connection() db.close_connection(conn) print '--------closed-------'