def monitor(website_name, merchant_name, batch_num):
     """
     chrome_options = webdriver.ChromeOptions()
     chrome_options.add_argument('--headless')
     driver = webdriver.Chrome(chrome_options=chrome_options,
                               executable_path=chromedriver_path)
     """
     try:
         driver = WebDriver.get_chrome()
         senti_util = SentiUtil()
         url = "http://www.chinaft.com.cn/news/search/_1.shtml?key=" + urllib.parse.quote(
             website_name)
         driver.get(url)
         source = driver.page_source
         senti_util.snapshot_home("交易中国", merchant_name, url, batch_num,
                                  driver)
         soup = BeautifulSoup(source, 'html.parser')
         news = soup.find_all("div",
                              attrs={'class': 'xixi_ChinaFT_left_news_box'})
         if news.__len__() > 0:
             for new in news:
                 if not gl.check_by_batch_num(batch_num):
                     break
                 href = new.find_all('a')[1].get("href")
                 content = new.find_all('a')[1].get_text()
                 if content.find(website_name) != -1:
                     senti_util.senti_process_text(
                         "交易中国", merchant_name, content,
                         "http://www.chinaft.com.cn" + href, batch_num)
         else:
             logger.info("交易中国没有搜索到数据: %s", merchant_name)
     except Exception as e:
         logger.error(e)
     finally:
         driver.quit()
Beispiel #2
0
 def monitor(keyword, batch_num, website):
     driver = WebDriver.get_chrome()
     senti_util = SentiUtil()
     url = "https://tousu.sina.com.cn/index/search/?keywords=" + urllib.parse.quote(keyword) + "&t=0"
     if driver is None:
         senti_util.log_error("黑猫投诉", url, batch_num, website)
         return
     else:
         pass
     try:
         driver.get(url)
         source = driver.page_source
         senti_util.snapshot_home("黑猫投诉", url, batch_num, website, driver)
         soup = BeautifulSoup(source, 'html.parser')
         items = soup.find_all(attrs={'class': 'blackcat-con'})
         if items.__len__() > 0:
             for item in items:
                 href = item.find_all('a')[0].get("href")
                 content = item.find_all('h1')[0].get_text()
                 if content.find(keyword) != -1:
                     senti_util.senti_process_text("黑猫投诉", content, href,
                                                   batch_num, website)
         else:
             logger.info("黑猫投诉没有搜索到数据: %s", keyword)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
Beispiel #3
0
 def monitor(keyword, batch_num, website):
     driver = WebDriver.get_chrome()
     senti_util = SentiUtil()
     url = "http://paynews.net/search.php?mod=forum"
     if driver is None:
         senti_util.log_error("支付产业网", url, batch_num, website)
         return
     else:
         pass
     try:
         driver.get(url)
         search_text_blank = driver.find_element_by_id("scform_srchtxt")
         search_text_blank.send_keys(keyword)
         search_text_blank.send_keys(Keys.RETURN)
         senti_util.snapshot_home("支付产业网", url, batch_num, website, driver)
         source = driver.page_source
         soup = BeautifulSoup(source, 'html.parser')
         div_list = soup.find(attrs={'class': 'slst mtw'})
         if div_list is not None and div_list.__len__() > 0:
             news = div_list.find_all('li')
             for new in news:
                 href = new.find_all('a')[0].get("href")
                 content = new.find_all('a')[0].get_text()
                 if content.find(keyword) != -1:
                     senti_util.senti_process_text(
                         "支付产业网", content, "http://paynews.net/" + href,
                         batch_num, website)
         else:
             logger.info("支付产业网没有搜索到数据: %s", keyword)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
Beispiel #4
0
    def monitor(keyword, batch_num, website):
        driver = WebDriver.get_chrome()
        senti_util = SentiUtil()
        url = 'https://baike.baidu.com/item/%s' % urllib.parse.quote(keyword)
        if driver is None:
            senti_util.log_error("百度百科", url, batch_num, website)
            return
        else:
            pass
        try:
            driver.get(url)
            source = driver.page_source
            soup = BeautifulSoup(source, 'html.parser')
            check_exist = soup.find_all(
                name='p', attrs={'class': re.compile('sorryCont')})
            if check_exist.__len__() == 0:
                description = soup.find(
                    attrs={"name": "description"})['content']
                senti_util.senti_process_text("百度百科", description, url,
                                              batch_num, website)
            else:
                senti_util.snapshot_home("百度百科", url, batch_num, website,
                                         driver)
                logger.info("百度百科没有搜索到数据: %s", keyword)

        except Exception as e:
            logger.error(e)
            return
        finally:
            driver.quit()
 def monitor(keyword, website_name, batch_num, merchant_name, merchant_num):
     """
     chrome_options = webdriver.ChromeOptions()
     chrome_options.add_argument('--headless')
     driver = webdriver.Chrome(chrome_options=chrome_options,
                               executable_path=chromedriver_path)
     """
     driver = WebDriver.get_chrome()
     try:
         senti_util = SentiUtil()
         url = "http://tieba.baidu.com/f?fr=wwwt&kw=" + urllib.parse.quote(
             keyword)
         driver.get(url)
         senti_util.snapshot_home("百度贴吧", website_name, url, batch_num,
                                  merchant_name, merchant_num, driver)
         source = driver.page_source
         soup = BeautifulSoup(source, 'html.parser')
         news = soup.find_all(
             "div", attrs={'class': 'threadlist_title pull_left j_th_tit '})
         if news.__len__() > 0:
             for new in news:
                 href = new.find_all('a')[0].get("href")
                 content = new.find_all('a')[0].get_text()
                 if content.find(keyword) != -1:
                     senti_util.senti_process_text(
                         "百度贴吧", website_name, content,
                         "http://tieba.baidu.com" + href, batch_num,
                         merchant_name, merchant_num)
         else:
             logger.info("百度贴吧没有搜索到数据: %s", keyword)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
Beispiel #6
0
 def get_access_res(url):
     driver = WebDriver.get_chrome_for_access()
     try:
         if str(url).startswith("http"):
             http_url = str(url)
         else:
             http_url = "http://" + str(url)
         logger.info("http_url: %s", http_url)
         driver.get(http_url)
         title = driver.title
         source = driver.page_source
         if title.__contains__('404') or source.__contains__(
                 'ERR_NAME_NOT_RESOLVED') or source.__contains__(
             'ERR_CONNECTION_REFUSED') or source.__contains__(
             'ERR_CONNECTION_TIMED_OUT') or source.__contains__(
             'ERR_NAME_NOT_RESOLVED') or source.__contains__(
             'ERR_NAME_RESOLUTION_FAILED') or source.__contains__(
             'DNS_PROBE_FINISHED_NXDOMAIN') or source.__contains__(
             'ERR_EMPTY_RESPONSE') or source.__contains__(
             '主机开设成功') or source.__contains__(
             '非法阻断') or source.__contains__(
             'Bad Request') or source.__contains__(
             '404 page not found') or source.__contains__('https://wanwang.aliyun.com/domain/parking'):
             return None, http_url
         else:
             return http_url, driver.current_url
     except Exception as e:
         logger.error(e)
         return None, None
     finally:
         driver.quit()
 def monitor(keyword, batch_num, website):
     driver = WebDriver.get_chrome()
     senti_util = SentiUtil()
     url = "http://ts.21cn.com/home/search?keyword=" + urllib.parse.quote(
         keyword)
     if driver is None:
         senti_util.log_error("聚投诉", url, batch_num, website)
         return
     else:
         pass
     try:
         driver.get(url)
         driver.implicitly_wait(3)
         source = driver.page_source
         senti_util.snapshot_home("聚投诉", url, batch_num, website, driver)
         soup = BeautifulSoup(source, 'html.parser')
         items = soup.find_all(attrs={'class': 'complain-item'})
         if items.__len__() > 0:
             for item in items:
                 href = item.find_all('a')[1].get("href")
                 content = item.find_all('a')[1].get_text()
                 if content.find(keyword) != -1:
                     senti_util.senti_process_text(
                         "聚投诉", content,
                         "http://www.paycircle.cn" + href[1:], batch_num,
                         website)
         else:
             logger.info("聚投诉没有搜索到数据: %s", keyword)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
Beispiel #8
0
 def monitor(keyword, batch_num, website):
     driver = WebDriver.get_chrome()
     senti_util = SentiUtil()
     url = "http://wenshu.court.gov.cn/"
     if driver is None:
         senti_util.log_error("黑猫投诉", url, batch_num, website)
         return
     else:
         pass
     try:
         driver.get(url)
         search_text_blank = driver.find_element_by_xpath(
             "//*[@class='searchKey search-inp']")
         search_text_blank.send_keys(keyword)
         search_text_blank.send_keys(Keys.RETURN)
         time.sleep(3)
         source = driver.page_source
         senti_util.snapshot_home("裁判文书网", url, batch_num, website, driver)
         soup = BeautifulSoup(source, 'html.parser')
         for a_tag in soup.find_all('a', class_='caseName'):
             href = a_tag.get("href")
             title = a_tag.get_text()
             if title.find(keyword) != -1:
                 senti_util.senti_process_text(
                     "裁判文书网", title,
                     "http://wenshu.court.gov.cn/website/wenshu" + href[2:],
                     batch_num, website)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
 def monitor(keyword, batch_num, website):
     driver = WebDriver.get_chrome()
     senti_util = SentiUtil()
     url = "https://www.baidu.com/"
     if driver is None:
         senti_util.log_error("百度搜索", url, batch_num, website)
         return
     else:
         pass
     try:
         driver.get(url)
         search_text_blank = driver.find_element_by_id("kw")
         search_text_blank.send_keys(keyword)
         search_text_blank.send_keys(Keys.RETURN)
         time.sleep(5)
         # driver.find_element_by_xpath('//input[@name="wd"]').send_keys(website_name)
         senti_util.snapshot_home("百度搜索", url, batch_num, website, driver)
         source = driver.page_source
         soup = BeautifulSoup(source, 'html.parser')
         for result_table in soup.find_all('h3', class_='t'):
             a_click = result_table.find("a")
             title = a_click.get_text()
             if title.find(keyword) != -1:
                 senti_util.senti_process_text("百度搜索", title, str(a_click.get("href")),
                                               batch_num, website)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
Beispiel #10
0
    def monitor(keyword, batch_num, website):
        driver = WebDriver.get_chrome()
        senti_util = SentiUtil()
        url = "http://www.paycircle.cn/company/search.php?kw=" + urllib.parse.quote(
            keyword) + "&c=SearchList&"
        if driver is None:
            senti_util.log_error("支付圈", url, batch_num, website)
            return
        else:
            pass
        try:
            driver.get(url)
            source = driver.page_source
            senti_util.snapshot_home("支付圈", url, batch_num, website, driver)
            soup = BeautifulSoup(source, 'html.parser')
            div_list = soup.find_all(attrs={'class': 'list'})
            if div_list.__len__() > 0:
                news = div_list[0].find_all('tr')
                for new in news:
                    href = new.find_all('td')[2].find_all('a')[0].get("href")
                    content = new.find_all('td')[2].find_all(
                        'li')[1].get_text()
                    if content.find(keyword) != -1:
                        senti_util.senti_process_text("支付圈", content, href,
                                                      batch_num, website)
            else:
                logger.info("支付圈没有搜索到数据: %s", keyword)

        except Exception as e:
            logger.error(e)
            return
        finally:
            driver.quit()
 def monitor(website_name, merchant_name, batch_num):
     try:
         driver = WebDriver.get_chrome()
         senti_util = SentiUtil()
         url = "https://www.p2peye.com/search.php?mod=zonghe&srchtxt=" + urllib.parse.quote(website_name)
         driver.get(url)
         source = driver.page_source
         senti_util.snapshot_home("网贷天眼", merchant_name, url,
                                  batch_num, driver)
         soup = BeautifulSoup(source, 'html.parser')
         news = soup.find_all(attrs={'class': 'result-t'})
         if news.__len__() > 0:
             for new in news:
                 href = new.find_all('a')[0].get("href")
                 content = new.get_text()
                 if content.find(website_name) != -1:
                     senti_util.senti_process_text("网贷天眼", merchant_name,content, "http://" + href[2:],
                                                   batch_num)
         else:
             logger.info("网贷天眼没有搜索到数据: %s", merchant_name)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
Beispiel #12
0
    def senti_process_text(platform, text, href, batch_num, website):
        driver = WebDriver.get_chrome()
        keyword_dao = KeywordDao()
        monitor_third_dao = MonitorThirdDao()

        #  截图
        try:
            driver.get(href)
            snapshot = SnapshotService.create_snapshot(driver, batch_num,
                                                       website, '舆情')
            is_normal = "正常"
            keywords = keyword_dao.get_all()
            for keyword in keywords:
                index = text.find(keyword.name)
                monitor_third = MonitorThird()
                monitor_third.website_name = website.website_name
                monitor_third.merchant_num = website.merchant_num
                monitor_third.merchant_name = website.merchant_name
                monitor_third.domain_name = website.domain_name
                monitor_third.saler = website.saler
                monitor_third.batch_num = batch_num
                monitor_third.url = href
                monitor_third.type = platform
                if index != -1:
                    is_normal = "异常"
                    monitor_third.is_normal = is_normal
                    monitor_third.level = '高'
                    monitor_third.outline = '检测到敏感词:' + str(keyword.name)
                    monitor_third.snapshot = snapshot

                    monitor_third_dao.add(monitor_third)
                else:
                    pass
            if is_normal == "正常":
                if platform == "百度百科":
                    monitor_third.level = '-'
                    monitor_third.outline = '-'
                    monitor_third.is_normal = is_normal
                    monitor_third.snapshot = snapshot
                    monitor_third_dao.add(monitor_third)
                pass

        except ConnectionError as conn_error:
            logger.error(conn_error)
        except Exception as e:
            logger.error(e)
            return
        finally:
            driver.quit()
Beispiel #13
0
class TestMysql(object):
    if __name__ == "__main__":
        url = "http://www.zfzj.cn/search.php"
        driver = webdriver.Remote(
            command_executor='http://172.17.161.230:8911/wd/hub',
            desired_capabilities=DesiredCapabilities.CHROME)

        driver.set_page_load_timeout(10)
        driver.set_script_timeout(10)
        driver.maximize_window()
    try:
        driver.get(url)
        search_text_blank = driver.find_element_by_id("scform_srchtxt")
        search_text_blank.send_keys('京东')
        search_text_blank.send_keys(Keys.RETURN)
        time.sleep(5)
        source = driver.page_source
        soup = BeautifulSoup(source, 'html.parser')
        items = soup.find_all(attrs={'class': 'blackcat-con'})
        if items.__len__() > 0:
            for item in items:
                href = item.find_all('a')[0].get("href")
                content = item.find_all('h1')[0].get_text()
        else:
            logger.info("黑猫投诉没有搜索到数据:")
        driver.quit()
    except Exception as e:
        logger.error(e)
        driver.quit()
Beispiel #14
0
class TestMysql(object):
    if __name__ == "__main__":
        url = "http://ts.21cn.com/home/search?keyword=%E4%BA%AC%E4%B8%9C"
        driver = webdriver.Remote(
            command_executor='http://172.17.161.230:8912/wd/hub',
            desired_capabilities=DesiredCapabilities.CHROME)

        driver.set_page_load_timeout(10)
        driver.set_script_timeout(10)
        driver.maximize_window()
    try:
        driver.get(url)
        time.sleep(5)
        source = driver.page_source
        soup = BeautifulSoup(source, 'html.parser')
        items = soup.find_all(attrs={'class': 'blackcat-con'})
        if items.__len__() > 0:
            for item in items:
                href = item.find_all('a')[0].get("href")
                content = item.find_all('h1')[0].get_text()
        else:
            logger.info("黑猫投诉没有搜索到数据:")
        driver.quit()
    except Exception as e:
        logger.error(e)
        driver.quit()
Beispiel #15
0
 def get_chrome():
     chrome_options = Options()
     # 禁止图片和css加载
     prefs = {"profile.managed_default_content_settings.images": 2, 'permissions.default.stylesheet': 2}
     chrome_options.add_experimental_option("prefs", prefs)
     try:
         driver = webdriver.Chrome(executable_path='/usr/bin/chromedriver',
                                   desired_capabilities=DesiredCapabilities.CHROME,
                                   options=chrome_options)
         driver.set_page_load_timeout(60)
         driver.set_script_timeout(60)
         driver.maximize_window()
         return driver
     except Exception as e:
         logger.error(e)
         return None
Beispiel #16
0
class TestMysql(object):
    if __name__ == "__main__":
        url = "http://www.paycircle.cn/company/search.php?kw=" + urllib.parse.quote(
            '京东') + "&c=SearchList&"
        driver = webdriver.Remote(
            command_executor='http://172.17.161.230:8911/wd/hub',
            desired_capabilities=DesiredCapabilities.CHROME)

        driver.set_page_load_timeout(10)
        driver.set_script_timeout(10)
        driver.maximize_window()
    try:
        driver.get(url)
        source = driver.page_source
        soup = BeautifulSoup(source, 'html.parser')
        div_list = soup.find_all(attrs={'class': 'list'})
        if div_list.__len__() > 0:
            news = div_list[0].find_all('tr')
            for new in news:
                href = new.find_all('td')[2].find_all('a')[0].get("href")
                content = new.find_all('td')[2].find_all('li')[1].get_text()
        else:
            logger.info("支付圈没有搜索到数据")
        driver.quit()
    except Exception as e:
        logger.error(e)
Beispiel #17
0
class TestWangdaitianyan(object):
    if __name__ == "__main__":
        """
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument('--headless')
        driver = webdriver.Chrome(chrome_options=chrome_options,
                                  executable_path="C:/chromedriver_2.38/chromedriver.exe")  
        driver.get("http://www.wangdaibus.com/search.php?mod=forum")
        driver.find_element_by_id("scform_srchtxt").send_keys(u"京东")
        driver.find_element_by_id("scform_submit").click()     
        """
        driver = WebDriver.get_chrome()
        try:
            driver.get(
                "http://www.wangdaibus.com/search/list?subject=%E4%BA%AC%E4%B8%9C"
            )
            aaa = "京东"
            #lement_by_xpath('//input[@name="subject"]').send_keys(aaa)
            #driver.find_element_by_xpath('//input[@name="subject"]').send_keys(Keys.ENTER)
            time.sleep(10)

        except Exception as e:  # 异常处理
            logger.error(e)
            pass
        SnapshotService.create_snapshot(driver)
        source = driver.page_source
        soup = BeautifulSoup(source, 'html.parser')
        news = soup.find_all("h3", attrs={'class': 'xs3'})
        if news.__len__() > 0:
            for new in news:
                href = new.find_all('a')[0].get("href")
                logger.info("http://www.wangdaibus.com/" + href)
                logger.info(new.get_text())
        '''
Beispiel #18
0
class TestMysql(object):
    if __name__ == "__main__":
        url = "http://paynews.net/search.php?mod=forum"
        driver = webdriver.Remote(
            command_executor='http://172.17.161.230:8911/wd/hub',
            desired_capabilities=DesiredCapabilities.CHROME)

        driver.set_page_load_timeout(10)
        driver.set_script_timeout(10)
        driver.maximize_window()
    try:
        driver.get(url)
        search_text_blank = driver.find_element_by_id("scform_srchtxt")
        search_text_blank.send_keys('京东')
        search_text_blank.send_keys(Keys.RETURN)
        source = driver.page_source
        soup = BeautifulSoup(source, 'html.parser')
        driver.save_screenshot("D:/a.png")
        div_list = soup.find(attrs={'class': 'slst mtw'})
        if div_list.__len__() > 0:
            news = div_list.find_all('li')
            for new in news:
                href = new.find_all('a')[0].get("href")
                content = new.find_all('a')[0].get_text()
                print(content)
        else:
            logger.info("支付产业网没有搜索到数据: %s")
        driver.quit()
    except Exception as e:
        logger.error(e)
        driver.quit()
Beispiel #19
0
class TestMysql(object):
    if __name__ == "__main__":
        url = "https://www.trackingmore.com/choose-cn-70634105326416.html?"
        # 实例化一个启动参数对象
        chrome_options = Options()
        # 禁止图片和css加载
        prefs = {"profile.managed_default_content_settings.images": 2, 'permissions.default.stylesheet': 2}
        chrome_options.add_experimental_option("prefs", prefs)
        driver = webdriver.Remote(command_executor='http://172.17.161.230:8911/wd/hub',
                                  desired_capabilities=DesiredCapabilities.CHROME,
                                  options=chrome_options)
        driver.maximize_window()
        driver.set_page_load_timeout(60)
    try:
        driver.get(url)
        source = driver.page_source
        soup = BeautifulSoup(source, 'html.parser')
        logis_list = soup.find_all(attrs={'class': 'ulliselect'})
        start = datetime.datetime.now()
        if logis_list is not None and logis_list.__len__() > 0:
            for logis in logis_list:
                href = logis.get("href")
                name = logis.get_text()
                print(name)
                try:
                    print(href)
                    url = "https:" + href
                    driver.get(url)
                    driver.save_screenshot("D:/" + name + ".png")
                    end = datetime.datetime.now()
                    print(end - start)
                except Exception as e:
                    logger.error(e)
                    print('time out after 30 seconds when loading page')
                    driver.execute_script('window.stop()')
                    print('111')
                    driver.save_screenshot("D:/" + href + ".png")
                    print('超时后截图完毕')
            end = datetime.datetime.now()
            print(end - start)
    except Exception as e:
        print('3333')
        logger.error(e)
        print('2222')
        driver.quit()
Beispiel #20
0
 def log_error(platform, href, batch_num, website):
     monitor_third_dao = MonitorThirdDao()
     #  截图
     try:
         is_normal = "异常"
         monitor_third = MonitorThird()
         monitor_third.merchant_num = website.merchant_num
         monitor_third.merchant_name = website.merchant_name
         monitor_third.website_name = website.website_name
         monitor_third.domain_name = website.domain_name
         monitor_third.saler = website.saler
         monitor_third.batch_num = batch_num
         monitor_third.url = href
         monitor_third.type = platform
         monitor_third.level = '-'
         monitor_third.outline = '异常截图失败'
         monitor_third.is_normal = is_normal
         monitor_third.snapshot = ""
         monitor_third_dao.add(monitor_third)
     except Exception as e:
         logger.error(e)
         return
Beispiel #21
0
def tracking_execute():
    job = os.environ['job']
    if job == "tracking":
        # 重启selenium
        stop_selenium()
        stop_chrome()
        gl.set_value('STATUS', True)
        gl.set_value('TRACKING_STATUS', True)
        ims_api.heartbeat()
        try:
            task_id = request.form['taskId']
            status = request.form['status']
            logger.info("tracking begin task_id: %s,status: %s" % (str(task_id), str(status)))
            t = threading.Thread(target=inspect_tracking, args=(task_id, status))
            t.setDaemon(True)
            t.start()
            return 'OK'
        except Exception as e:
            logger.error(e)
    else:
        logger.info("Tracking is not my job!")
        return 'OK'
 def monitor(website_name, merchant_name, batch_num):
     """
     chrome_options = webdriver.ChromeOptions()
     chrome_options.add_argument('--headless')
     driver = webdriver.Chrome(chrome_options=chrome_options,
                               executable_path=chromedriver_path)
     """
     try:
         driver = WebDriver.get_chrome()
         senti_util = SentiUtil()
         url = "https://www.wdzj.com/front/search/index?key=" + urllib.parse.quote(
             website_name)
         driver.get(url)
         source = driver.page_source
         senti_util.snapshot_home("网贷之家", merchant_name, url, batch_num,
                                  driver)
         soup = BeautifulSoup(source, 'html.parser')
         tzbox = soup.find_all("ul", attrs={'class': 'so-tzbox'})
         if tzbox.__len__() == 0:
             return
         news = tzbox[0].find_all("li")
         if news.__len__() > 0:
             for new in news:
                 if not gl.check_by_batch_num(batch_num):
                     break
                 href = new.find_all('a')[0].get("href")
                 content = new.get_text()
                 if content.find(website_name) != -1:
                     senti_util.senti_process_text("网贷之家", merchant_name,
                                                   content,
                                                   "http://" + href[2:],
                                                   batch_num)
         else:
             logger.info("网贷之家没有搜索到数据: %s", merchant_name)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
 def monitor(website_name, merchant_name, batch_num):
     """
     chrome_options = webdriver.ChromeOptions()
     chrome_options.add_argument('--headless')
     driver = webdriver.Chrome(chrome_options=chrome_options,
                               executable_path=chromedriver_path)
     """
     try:
         driver = WebDriver.get_chrome()
         senti_util = SentiUtil()
         url = "http://www.wangdaibus.com/search/list?subject=" + urllib.parse.quote(
             website_name)
         driver.get(url)
         time.sleep(10)
         senti_util.snapshot_home("网贷巴士", merchant_name, url, batch_num,
                                  driver)
         # driver.find_element_by_xpath('//input[@name="srchtxt"]').send_keys(website_name)
         # driver.find_element_by_xpath('//input[@name="srchtxt"]').send_keys(Keys.ENTER)
         source = driver.page_source
         soup = BeautifulSoup(source, 'html.parser')
         news = soup.find_all("h3", attrs={'class': 'xs3'})
         if news.__len__() > 0:
             for new in news:
                 if not gl.check_by_batch_num(batch_num):
                     break
                 href = new.find_all('a')[0].get("href")
                 content = new.get_text()
                 if content.find(website_name) != -1:
                     senti_util.senti_process_text(
                         "网贷巴士", merchant_name, content,
                         "http://www.wangdaibus.com/" + href, batch_num)
         else:
             logger.info("网贷巴士没有搜索到数据: %s", merchant_name)
     except Exception as e:
         logger.error(e)
         return
     finally:
         driver.quit()
 def monitor(keyword, batch_num, website):
     driver = WebDriver.get_chrome()
     senti_util = SentiUtil()
     url = "http://www.zhifujie.com/search/search"
     if driver is None:
         senti_util.log_error("支付界", url, batch_num, website)
         return
     else:
         pass
     try:
         driver.get(url)
         search_text_blank = driver.find_element_by_id("searchbox")
         search_text_blank.send_keys(keyword)
         driver.find_element_by_xpath(
             '//button[contains(text(), "搜索")]').click()
         time.sleep(5)
         source = driver.page_source
         senti_util.snapshot_home("支付界", url, batch_num, website, driver)
         soup = BeautifulSoup(source, 'html.parser')
         items = soup.find_all(attrs={'class': 'main-news-content-item'})
         if items.__len__() > 0:
             for item in items:
                 href = item.find_all('a')[1].get("href")
                 content = item.find_all('a')[1].get_text()
                 if content.find(keyword) != -1:
                     senti_util.senti_process_text(
                         "支付界", content,
                         "http://www.paycircle.cn" + href[1:], batch_num,
                         website)
         else:
             logger.info("支付界没有搜索到数据: %s", keyword)
     except Exception as e:
         logger.error(e)
         senti_util.snapshot_home("支付界", url, batch_num, website, driver)
         return
     finally:
         driver.quit()
Beispiel #25
0
 def snapshot_home(platform, href, batch_num, website, driver):
     monitor_third_dao = MonitorThirdDao()
     #  截图
     try:
         snapshot = SnapshotService.create_snapshot(driver, batch_num,
                                                    website, '舆情')
         is_normal = "正常"
         monitor_third = MonitorThird()
         monitor_third.merchant_num = website.merchant_num
         monitor_third.merchant_name = website.merchant_name
         monitor_third.website_name = website.website_name
         monitor_third.domain_name = website.domain_name
         monitor_third.saler = website.saler
         monitor_third.batch_num = batch_num
         monitor_third.url = href
         monitor_third.type = platform
         monitor_third.level = '-'
         monitor_third.outline = '首页截图'
         monitor_third.is_normal = is_normal
         monitor_third.snapshot = snapshot
         monitor_third_dao.add(monitor_third)
     except Exception as e:
         logger.error(e)
         return
Beispiel #26
0
def execute():
    logger.info("receive execute req !")
    job = os.environ['job']
    if job == "bc" or job == "other":
        # 重启selenium
        logger.info("restart  selenium...")
        stop_selenium()
        start_selenium()
        logger.info("update status...")
        gl.set_value('STATUS', True)
        logger.info("heartbeat...")
        ims_api.heartbeat()
        try:
            batch_num = request.form['batchNum']
            logger.info("spider begin batchNum: %s" % str(batch_num))
            t = threading.Thread(target=inspect, args=(batch_num,))
            t.setDaemon(True)
            t.start()
            return 'OK'
        except Exception as e:
            logger.error(e)
    else:
        logger.info("spider is not my job!")
        return 'OK'
Beispiel #27
0
class TestMysql(object):
    if __name__ == "__main__":
        driver = webdriver.Remote(command_executor='http://172.17.161.230:8911/wd/hub',
                                  desired_capabilities=DesiredCapabilities.CHROME)

        driver.set_page_load_timeout(10)
        driver.set_script_timeout(10)
        driver.maximize_window()
    try:
        driver.get("http://en.ly.com")
        driver.save_screenshot("D:/bb.jpg")
        print(driver.current_url)
        driver.quit()
    except Exception as e:
        logger.error(e)
Beispiel #28
0
 def get_proxy_access_res(url):
     if str(url).startswith("http"):
         http_url = str(url)
     else:
         http_url = "http://" + str(url)
     driver = WebDriver.get_proxy_chrome()
     if driver is None:
         return None, None
     else:
         try:
             logger.info("http_url: %s", http_url)
             driver.get(http_url)
             title = driver.title
             if title.__contains__('404') or driver.page_source.__contains__(
                     'ERR_NAME_NOT_RESOLVED') or driver.page_source.__contains__(
                 'ERR_CONNECTION_REFUSED') or driver.page_source.__contains__('ERR_CONNECTION_TIMED_OUT'):
                 return None, http_url
             else:
                 return http_url, driver.current_url
         except Exception as e:
             logger.error(e)
             return None, None
         finally:
             driver.quit()
Beispiel #29
0
class TestMysql(object):
    if __name__ == "__main__":
        url = "https://www.tianyancha.com/search?key=%E4%BA%AC%E4%B8%9C"
        driver = webdriver.Remote(
            command_executor='http://172.17.161.230:8912/wd/hub',
            desired_capabilities=DesiredCapabilities.CHROME)

        driver.set_page_load_timeout(10)
        driver.set_script_timeout(10)
        driver.maximize_window()
    try:
        driver.get(url)
        time.sleep(5)
        driver.save_screenshot("D:/bb.jpg")
        driver.get("https://www.tianyancha.com/company/12562796")
        time.sleep(5)
        driver.save_screenshot("D:/cc.jpg")
        driver.quit()
    except Exception as e:
        logger.error(e)
Beispiel #30
0
 def tran2text(url):
     # TODO
     logger.error("TODO %s" % url)
     return None
     try:
         if str(url).endswith(".jpg") or str(url).endswith(".png") or str(url).endswith(".bmp") or str(url).endswith(
                 ".jpeg"):
             logger.info("src path: %s" % url)
             text = pytesseract.image_to_string(Image.open(url), lang='chi_sim')
             logger.info("text: %s", str(text))
             if text == "":
                 return None
             return text
         else:
             logger.error("url is not pic! url:%s" % url)
             return None
     except Exception as e:
         logger.error(e)
         return None