def search_from_keywordDB(self,response): db = MysqlStore();conn = db.get_connection() main_url = "http://s.weibo.com/weibo/" getsearchpage = GetSearchpage() sql1 = "select keyword from cauc_keyword_test_copy where is_search = 0" cursor = db.select_operation(conn,sql1) for round in range(3): for keyword in cursor.fetchall(): keyword = keyword[0] print "this is the unsearched keyword:",keyword sql2 = "update cauc_keyword_test_copy set is_search = 1 where keyword = '%s'" % keyword db.update_operation(conn,sql2) search_url = main_url + getsearchpage.get_searchurl(keyword) #yield Request(url=search_url,meta={'cookiejar':response.meta['cookiejar'],'search_url':search_url,'keyword':keyword},callback=self.parse_total_page) print "current timestamp:",int(time.time()) #设置循环爬取间隔 time.sleep(WeiboSpider.settings['KEYWORD_INTERVAL']) sql3 = "select keyword from cauc_keyword_test_copy where is_search = 1" cursor = db.select_operation(conn,sql3) for keyword in cursor.fetchall(): keyword = keyword[0] print "this is the searched keyword",keyword #yield Request(url=search_url,meta={'cookiejar':response.meta['cookiejar'],'search_url':search_url,'keyword':keyword},callback=self.parse_total_page) conn.close()
def search_from_keywordDB(self, response): db = MysqlStore() main_url = "http://s.weibo.com/weibo/" getsearchpage = GetSearchpage() for round in range(1): #遍历数据库的轮数 conn = db.get_connection() #选取is_search位为0的关键词 sql1 = "select keyword from cauc_keyword_test_copy where is_search = 0 and is_delete = 0" cursor1 = db.select_operation(conn, sql1) #对is_search位为0的关键词进行爬取 for keyword in cursor1.fetchall(): keyword = keyword[0] logger.info("this is the unsearched keyword:%s", keyword) search_url = main_url + getsearchpage.get_searchurl(keyword) yield Request(url=search_url, cookies=random.choice(COOKIES), meta={ 'search_url': search_url, 'keyword': keyword }, callback=self.parse_total_page) #选取is_search位为1的关键词 sql2 = "select keyword from cauc_keyword_test_copy where is_search = 1 and is_delete = 0" cursor2 = db.select_operation(conn, sql2) #对is_search位为1的关键词进行爬取 for keyword in cursor2.fetchall(): keyword = keyword[0] logger.info("this is the searched keyword:%s", keyword) end_time = get_current_time() #start_time = get_time_by_interval(int(time.time()),3600) #爬取3600秒,即1小时前的内容 start_time = get_time_by_interval(int( time.time()), int(self.interval)) #爬取interval秒前的内容 search_url = main_url + getsearchpage.get_searchurl_time( keyword, start_time, end_time) yield Request(url=search_url, cookies=random.choice(COOKIES), meta={ 'search_url': search_url, 'keyword': keyword }, callback=self.parse_total_page) #更新is_search标志位为1 sql3 = "update cauc_keyword_test_copy set is_search = 1 where is_search = 0 and is_delete = 0" db.update_operation(conn, sql3) db.close_connection(conn)
def search_from_keywordDB(self, response): db = MysqlStore() main_url = "http://s.weibo.com/weibo/" getsearchpage = GetSearchpage() for round in range(1): #遍历数据库的轮数 conn = db.get_connection() #对is_search位为0的关键词进行爬取 sql1 = "select keyword from cauc_keyword_test_copy where is_search = 0" cursor = db.select_operation(conn, sql1) for keyword in cursor.fetchall(): keyword = keyword[0] logger.info("this is the unsearched keyword:%s", keyword) #更新is_search标志位为1 sql2 = "update cauc_keyword_test_copy set is_search = 1 where keyword = '%s'" % keyword db.update_operation(conn, sql2) search_url = main_url + getsearchpage.get_searchurl(keyword) yield Request(url=search_url, meta={ 'cookiejar': response.meta['cookiejar'], 'search_url': search_url, 'keyword': keyword }, callback=self.parse_total_page) logger.info("current timestamp:%d", int(time.time())) #设置循环爬取间隔 time.sleep(WeiboSpider.settings['KEYWORD_INTERVAL']) #可以采用间隔15min #对is_search位为1的关键词进行爬取 sql3 = "select keyword from cauc_keyword_test_copy where is_search = 1" cursor = db.select_operation(conn, sql3) for keyword in cursor.fetchall(): keyword = keyword[0] logger.info("this is the searched keyword:%s", keyword) end_time = get_current_time() start_time = get_time_by_interval(int(time.time()), 3600) #爬取3600秒,即1小时前的内容 search_url = main_url + getsearchpage.get_searchurl_time( keyword, start_time, end_time) yield Request(url=search_url, meta={ 'cookiejar': response.meta['cookiejar'], 'search_url': search_url, 'keyword': keyword }, callback=self.parse_total_page) conn.close()
def start_getweibo_info(self,response): db = MysqlStore(); #取出没有爬取过的且is_delete=0的重点人员 GetWeibopage.data['page'] = 1; getweibopage = GetWeibopage() for round in range(1): #遍历数据库的轮数 conn = db.get_connection() sql1 = "select user_id from cauc_black_man_test a \ where a.is_search = 0 and a.is_delete = 0" cursor = db.select_operation(conn,sql1) for user_id in cursor.fetchall(): user_id = user_id[0] logger.info("this is the unsearched user_id:%s",user_id) #更新is_search标志位为1 sql2 = "update cauc_black_man_test set is_search = 1 where user_id = '%s'" % user_id db.update_operation(conn,sql2) #获取需要爬取的总页面数 start_time = self.start_time;end_time = get_current_time('hour') mainpage_url = "http://weibo.com/" + str(user_id) + "?is_ori=1&is_forward=1&is_text=1&is_pic=1&key_word=&start_time=" + start_time + "&end_time=" + end_time + "&is_search=1&is_searchadv=1&" GetWeibopage.data['uid'] = user_id; thirdload_url = mainpage_url + getweibopage.get_thirdloadurl() yield Request(url=thirdload_url,meta={'cookiejar':response.meta['cookiejar'],'mainpage_url':mainpage_url,'uid':user_id,'is_search':0},callback=self.parse_total_page) logger.info("current timestamp:%d",int(time.time())) #设置循环爬取间隔 time.sleep(WeiboSpider.settings['FRIENDCIRCAL_INTERVAL']) #可以采用间隔15min #取出已经爬取过is_search=1的且is_delete=0的重点人员 sql3 = "select user_id from cauc_black_man_test a \ where a.is_search = 1 and a.is_delete = 0" cursor = db.select_operation(conn,sql3) for user_id in cursor.fetchall(): user_id = user_id[0] logger.info("this is the searched user_id:%s",user_id) start_time = get_time_by_interval(int(time.time()),86400,'hour');end_time = get_current_time('hour') #起始和结束间隔时间为1天(86400s) mainpage_url = "http://weibo.com/" + str(user_id) + "?is_ori=1&is_forward=1&is_text=1&is_pic=1&key_word=&start_time=" + start_time + "&end_time=" + end_time + "&is_search=1&is_searchadv=1&" GetWeibopage.data['uid'] = user_id; thirdload_url = mainpage_url + getweibopage.get_thirdloadurl() #yield Request(url=thirdload_url,meta={'cookiejar':response.meta['cookiejar'],'mainpage_url':mainpage_url,'uid':user_id,'is_search':1},callback=self.parse_total_page) conn.close()
def start_getweibo_info(self,response): db = MysqlStore(); #取出没有爬取过的且is_delete=0的重点人员 GetWeibopage.data['page'] = 1; getweibopage = GetWeibopage() for round in range(1): #遍历数据库的轮数 conn = db.get_connection() sql1 = "select user_id from cauc_warning_man_test a \ where a.is_search = 0 and a.is_delete = 0" cursor = db.select_operation(conn,sql1) for user_id in cursor.fetchall(): user_id = user_id[0] logger.info("this is the unsearched user_id:%s",user_id) #更新is_search标志位为1 sql2 = "update cauc_warning_man_test set is_search = 1 where user_id = '%s'" % user_id db.update_operation(conn,sql2) #获取需要爬取的总页面数 start_time = self.start_time;end_time = get_current_time('hour') mainpage_url = "http://weibo.com/" + str(user_id) + "?is_ori=1&is_forward=1&is_text=1&is_pic=1&key_word=&start_time=" + start_time + "&end_time=" + end_time + "&is_search=1&is_searchadv=1&" GetWeibopage.data['uid'] = user_id; thirdload_url = mainpage_url + getweibopage.get_thirdloadurl() yield Request(url=thirdload_url,meta={'cookiejar':response.meta['cookiejar'],'mainpage_url':mainpage_url,'uid':user_id,'is_search':0},callback=self.parse_total_page) logger.info("current timestamp:%d",int(time.time())) #设置循环爬取间隔 time.sleep(WeiboSpider.settings['WEIBOCONTENT_INTERVAL']) #可以采用间隔15min #取出已经爬取过is_search=1的且is_delete=0的预警人员 sql3 = "select user_id from cauc_warning_man_test a \ where a.is_search = 1 and a.is_delete = 0" cursor = db.select_operation(conn,sql3) for user_id in cursor.fetchall(): user_id = user_id[0] logger.info("this is the searched user_id:%s",user_id) start_time = get_time_by_interval(int(time.time()),86400,'hour');end_time = get_current_time('hour') #起始和结束间隔时间为1天(86400s) mainpage_url = "http://weibo.com/" + str(user_id) + "?is_ori=1&is_forward=1&is_text=1&is_pic=1&key_word=&start_time=" + start_time + "&end_time=" + end_time + "&is_search=1&is_searchadv=1&" GetWeibopage.data['uid'] = user_id; thirdload_url = mainpage_url + getweibopage.get_thirdloadurl() #yield Request(url=thirdload_url,meta={'cookiejar':response.meta['cookiejar'],'mainpage_url':mainpage_url,'uid':user_id,'is_search':1},callback=self.parse_total_page) conn.close()
def start_getweibo_info(self,response): db = MysqlStore(); #取出没有爬取过的且is_delete=0的重点人员 GetWeibopage.data['page'] = 1; getweibopage = GetWeibopage() #for round in range(1): #遍历数据库的轮数 conn = db.get_connection() sql1 = "select user_id from cauc_warning_man a \ where a.is_search = 0 and a.is_delete = 0" cursor1 = db.select_operation(conn,sql1) for user_id in cursor1.fetchall(): user_id = user_id[0] logger.info("this is the unsearched user_id:%s",user_id) #获取需要爬取的总页面数 start_time = self.start_time;end_time = get_current_time('day') mainpage_url = "http://weibo.com/" + str(user_id) + "?is_ori=1&is_forward=1&is_text=1&is_pic=1&key_word=&start_time=" + start_time + "&end_time=" + end_time + "&is_search=1&is_searchadv=1&" GetWeibopage.data['uid'] = user_id; thirdload_url = mainpage_url + getweibopage.get_thirdloadurl() yield Request(url=thirdload_url,cookies=random.choice(COOKIES),meta={'mainpage_url':mainpage_url,'uid':user_id,'is_search':0},callback=self.parse_total_page) #取出已经爬取过is_search=1的且is_delete=0的预警人员 sql2 = "select user_id from cauc_warning_man a \ where a.is_search = 1 and a.is_delete = 0" cursor2 = db.select_operation(conn,sql2) for user_id in cursor2.fetchall(): user_id = user_id[0] logger.info("this is the searched user_id:%s",user_id) #start_time = get_time_by_interval(int(time.time()),86400,'day');end_time = get_current_time('day') #起始和结束间隔时间为1天(86400s),即过去一天的内容 start_time = get_time_by_interval(int(time.time()),int(self.interval),'day');end_time = get_current_time('day') #起始和结束间隔时间为x天(由interval代表的秒换算而来) mainpage_url = "http://weibo.com/" + str(user_id) + "?is_ori=1&is_forward=1&is_text=1&is_pic=1&key_word=&start_time=" + start_time + "&end_time=" + end_time + "&is_search=1&is_searchadv=1&" GetWeibopage.data['uid'] = user_id; thirdload_url = mainpage_url + getweibopage.get_thirdloadurl() yield Request(url=thirdload_url,cookies=random.choice(COOKIES),meta={'mainpage_url':mainpage_url,'uid':user_id,'is_search':1},callback=self.parse_total_page) #更新is_search标志位为1 sql3 = "update cauc_warning_man set is_search = 1 where is_search = 0 and is_delete = 0" db.update_operation(conn,sql3) db.close_connection(conn)
def search_from_keywordDB(self,response): db = MysqlStore();main_url = "http://s.weibo.com/weibo/" getsearchpage = GetSearchpage() for round in range(1): #遍历数据库的轮数 conn = db.get_connection() #选取is_search位为0的关键词 sql1 = "select keyword from cauc_keyword_test_copy where is_search = 0 and is_delete = 0" cursor1 = db.select_operation(conn,sql1) #对is_search位为0的关键词进行爬取 for keyword in cursor1.fetchall(): keyword = keyword[0] logger.info("this is the unsearched keyword:%s",keyword) search_url = main_url + getsearchpage.get_searchurl(keyword) yield Request(url=search_url,cookies=random.choice(COOKIES),meta={'search_url':search_url,'keyword':keyword},callback=self.parse_total_page) #选取is_search位为1的关键词 sql2 = "select keyword from cauc_keyword_test_copy where is_search = 1 and is_delete = 0" cursor2 = db.select_operation(conn,sql2) #对is_search位为1的关键词进行爬取 for keyword in cursor2.fetchall(): keyword = keyword[0] logger.info("this is the searched keyword:%s",keyword) end_time = get_current_time() #start_time = get_time_by_interval(int(time.time()),3600) #爬取3600秒,即1小时前的内容 start_time = get_time_by_interval(int(time.time()),int(self.interval)) #爬取interval秒前的内容 search_url = main_url + getsearchpage.get_searchurl_time(keyword,start_time,end_time) yield Request(url=search_url,cookies=random.choice(COOKIES),meta={'search_url':search_url,'keyword':keyword},callback=self.parse_total_page) #更新is_search标志位为1 sql3 = "update cauc_keyword_test_copy set is_search = 1 where is_search = 0 and is_delete = 0" db.update_operation(conn,sql3) db.close_connection(conn)
def search_from_keywordDB(self,response): db = MysqlStore();main_url = "http://s.weibo.com/weibo/" getsearchpage = GetSearchpage() for round in range(1): #遍历数据库的轮数 conn = db.get_connection() #对is_search位为0的关键词进行爬取 sql1 = "select keyword from cauc_keyword_test_copy where is_search = 0" cursor = db.select_operation(conn,sql1) for keyword in cursor.fetchall(): keyword = keyword[0] logger.info("this is the unsearched keyword:%s",keyword) #更新is_search标志位为1 sql2 = "update cauc_keyword_test_copy set is_search = 1 where keyword = '%s'" % keyword db.update_operation(conn,sql2) search_url = main_url + getsearchpage.get_searchurl(keyword) yield Request(url=search_url,meta={'cookiejar':response.meta['cookiejar'],'search_url':search_url,'keyword':keyword},callback=self.parse_total_page) logger.info("current timestamp:%d",int(time.time())) #设置循环爬取间隔 time.sleep(WeiboSpider.settings['KEYWORD_INTERVAL']) #可以采用间隔15min #对is_search位为1的关键词进行爬取 sql3 = "select keyword from cauc_keyword_test_copy where is_search = 1" cursor = db.select_operation(conn,sql3) for keyword in cursor.fetchall(): keyword = keyword[0] logger.info("this is the searched keyword:%s",keyword) end_time = get_current_time() start_time = get_time_by_interval(int(time.time()),3600) #爬取3600秒,即1小时前的内容 search_url = main_url + getsearchpage.get_searchurl_time(keyword,start_time,end_time) yield Request(url=search_url,meta={'cookiejar':response.meta['cookiejar'],'search_url':search_url,'keyword':keyword},callback=self.parse_total_page) conn.close()