示例#1
0
文件: appname.py 项目: yang121/pmdesk
def crawl_baidu_shouji(n, proxies=True, debug=False):
    """
    根据分类首页获得最大页码决定范围,暂时只适配百度url
    :param n:
    :param proxies:
    :param debug:
    :return:
    """
    url = 'http://shouji.baidu.com/software/%s/' % n
    while True:
        html = get_page(url, proxies)
        doc = pq(html)
        try:
            page_num = int(doc('.next').prev('li').children().text())
        except ValueError as e:
            print('获取信息失败: ', e)
            continue

        print('total page: ', page_num)
        if debug:
            page_num = 2
        for pn in range(1, page_num + 1):
            sub_url = 'http://shouji.baidu.com/software/%s/list_%s.html' % (n,
                                                                            pn)
            print()
            print(
                '=============================================================='
            )
            print('正在爬: ', sub_url)
            html = get_page(sub_url, proxies, selector='p.down-btn > span')
            yield html
        break
示例#2
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_data5u(self):
     start_url = 'http://www.data5u.com/free/gngn/index.shtml'
     headers = {
         'Accept':
         'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
         'Accept-Encoding':
         'gzip, deflate',
         'Accept-Language':
         'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
         'Cache-Control':
         'max-age=0',
         'Connection':
         'keep-alive',
         'Cookie':
         'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
         'Host':
         'www.data5u.com',
         'Referer':
         'http://www.data5u.com/free/index.shtml',
         'Upgrade-Insecure-Requests':
         '1',
         'User-Agent':
         'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
     }
     html = get_page(start_url, options=headers)
     if html:
         ip_address = re.compile(
             '<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>',
             re.S)
         re_ip_address = ip_address.findall(html)
         for address, port in re_ip_address:
             result = address + ':' + port
             yield result.replace(' ', '')
示例#3
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_89ip(self):
     start_url = 'http://www.89ip.cn/apijk/?&tqsl=1000&sxa=&sxb=&tta=&ports=&ktip=&cf=1'
     html = get_page(start_url)
     if html:
         find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S)
         ip_ports = find_ips.findall(html)
         for address_port in ip_ports:
             yield address_port
示例#4
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_ip181(self):
     start_url = 'http://www.ip181.com/'
     html = get_page(start_url)
     ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
     # \s* 匹配空格,起到换行作用
     re_ip_address = ip_address.findall(html)
     for address, port in re_ip_address:
         result = address + ':' + port
         yield result.replace(' ', '')
示例#5
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_premproxy(self):
     for i in ['China-01', 'China-02', 'China-03', 'China-04', 'Taiwan-01']:
         start_url = 'https://premproxy.com/proxy-by-country/{}.htm'.format(
             i)
         html = get_page(start_url)
         if html:
             ip_address = re.compile('<td data-label="IP:port ">(.*?)</td>')
             re_ip_address = ip_address.findall(html)
             for address_port in re_ip_address:
                 yield address_port.replace(' ', '')
示例#6
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_ip3366(self):
     for page in range(1, 4):
         start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(
             page)
         html = get_page(start_url)
         ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
         # \s * 匹配空格,起到换行作用
         re_ip_address = ip_address.findall(html)
         for address, port in re_ip_address:
             result = address + ':' + port
             yield result.replace(' ', '')
示例#7
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_kxdaili(self):
     for i in range(1, 11):
         start_url = 'http://www.kxdaili.com/ipList/{}.html#ip'.format(i)
         html = get_page(start_url)
         ip_address = re.compile(
             '<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
         # \s* 匹配空格,起到换行作用
         re_ip_address = ip_address.findall(html)
         for address, port in re_ip_address:
             result = address + ':' + port
             yield result.replace(' ', '')
示例#8
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_kuaidaili(self):
     for i in range(1, 4):
         start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
         html = get_page(start_url)
         if html:
             ip_address = re.compile('<td data-title="IP">(.*?)</td>')
             re_ip_address = ip_address.findall(html)
             port = re.compile('<td data-title="PORT">(.*?)</td>')
             re_port = port.findall(html)
             for address, port in zip(re_ip_address, re_port):
                 address_port = address + ':' + port
                 yield address_port.replace(' ', '')
示例#9
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_goubanjia(self):
     """
     获取Goubanjia
     :return: 代理
     """
     start_url = 'http://www.goubanjia.com/free/gngn/index.shtml'
     html = get_page(start_url)
     if html:
         doc = pq(html)
         tds = doc('td.ip').items()
         for td in tds:
             td.find('p').remove()
             yield td.text().replace(' ', '')
示例#10
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_proxy360(self):
     """
     获取Proxy360
     :return: 代理
     """
     start_url = 'http://www.proxy360.cn/Region/China'
     print('Crawling', start_url)
     html = get_page(start_url)
     if html:
         doc = pq(html)
         lines = doc('div[name="list_proxy_ip"]').items()
         for line in lines:
             ip = line.find('.tbBottomLine:nth-child(1)').text()
             port = line.find('.tbBottomLine:nth-child(2)').text()
             yield ':'.join([ip, port])
示例#11
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_iphai(self):
     start_url = 'http://www.iphai.com/'
     html = get_page(start_url)
     if html:
         find_tr = re.compile('<tr>(.*?)</tr>', re.S)
         trs = find_tr.findall(html)
         for s in range(1, len(trs)):
             find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>',
                                  re.S)
             re_ip_address = find_ip.findall(trs[s])
             find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
             re_port = find_port.findall(trs[s])
             for address, port in zip(re_ip_address, re_port):
                 address_port = address + ':' + port
                 yield address_port.replace(' ', '')
示例#12
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_ip3366(self):
     for i in range(1, 4):
         start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i)
         html = get_page(start_url)
         if html:
             find_tr = re.compile('<tr>(.*?)</tr>', re.S)
             trs = find_tr.findall(html)
             for s in range(1, len(trs)):
                 find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
                 re_ip_address = find_ip.findall(trs[s])
                 find_port = re.compile('<td>(\d+)</td>')
                 re_port = find_port.findall(trs[s])
                 for address, port in zip(re_ip_address, re_port):
                     address_port = address + ':' + port
                     yield address_port.replace(' ', '')
示例#13
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_xroxy(self):
     for i in ['CN', 'TW']:
         start_url = 'http://www.xroxy.com/proxylist.php?country={}'.format(
             i)
         html = get_page(start_url)
         if html:
             ip_address1 = re.compile(
                 "title='View this Proxy details'>\s*(.*).*")
             re_ip_address1 = ip_address1.findall(html)
             ip_address2 = re.compile(
                 "title='Select proxies with port number .*'>(.*)</a>")
             re_ip_address2 = ip_address2.findall(html)
             for address, port in zip(re_ip_address1, re_ip_address2):
                 address_port = address + ':' + port
                 yield address_port.replace(' ', '')
示例#14
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_daili66(self, page_count=4):
     """
     获取代理66
     :param page_count: 页码
     :return: 代理
     """
     start_url = 'http://www.66ip.cn/{}.html'
     urls = [start_url.format(page) for page in range(1, page_count + 1)]
     for url in urls:
         print('Crawling', url)
         html = get_page(url)
         if html:
             doc = pq(html)
             trs = doc('.containerbox table tr:gt(0)').items()
             for tr in trs:
                 ip = tr.find('td:nth-child(1)').text()
                 port = tr.find('td:nth-child(2)').text()
                 yield ':'.join([ip, port])
示例#15
0
文件: proxy.py 项目: yang121/pmdesk
 def crawl_xicidaili(self):
     for i in range(1, 3):
         start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
         headers = {
             'Accept':
             'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
             'Cookie':
             '_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
             'Host': 'www.xicidaili.com',
             'Referer': 'http://www.xicidaili.com/nn/3',
             'Upgrade-Insecure-Requests': '1',
         }
         html = get_page(start_url, options=headers)
         if html:
             find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
             trs = find_trs.findall(html)
             for tr in trs:
                 find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
                 re_ip_address = find_ip.findall(tr)
                 find_port = re.compile('<td>(\d+)</td>')
                 re_port = find_port.findall(tr)
                 for address, port in zip(re_ip_address, re_port):
                     address_port = address + ':' + port
                     yield address_port.replace(' ', '')
示例#16
0
def crawl_chandashi_ios(keyword, proxies=True, debug=False):
    """
    根据分类首页获得最大页码决定范围,暂时只适配百度url
    :param n:
    :param proxies:
    :param debug:
    :return:
    """
    base_url = 'https://www.chandashi.com'
    url = base_url + '/search/index.html?keyword=%s&type=store' % keyword
    while True:
        html = get_page(url, proxies, selector='#searchlist')
        doc = pq(html)
        try:
            ios_url = base_url + doc(
                '#searchlist > div:nth-child(1) > div > a').attr('href')
            ios_html = get_page(ios_url, proxies)
            print(
                '=============================================================='
            )
            print('正在爬: ', ios_url)
            ios_doc = pq(ios_html)
            android_url = base_url + ios_doc(
                '#pageTop > header > nav.navbar.nav-an.mobile-hide > div > div > ul > li:nth-child(2) > a'
            ).attr('href')
            android_html = get_page(android_url, proxies)

            yield ios_html, android_html

        except Exception as e:
            print('获取信息失败: ', e)
            continue


#
# def parse_baidu_shouji(html):
#     doc = pq(html)
#     down_btn = doc('p.down-btn > span')
#     if down_btn:
#         print('本页有%s个应用' % down_btn.length)
#         for d in down_btn.items():
#             name = d.attr('data_name').strip()
#             apk_name = d.attr('data_package').strip()
#             if name and apk_name:
#                 data = {
#                     'name': name,
#                     'apk_name': apk_name
#                 }
#                 yield data
#     else:
#         print('无数据!')
#
#
# def main(n):
#     """
#     所有环节串联
#     :param n:
#     :return:
#     """
#     htmls = crawl_baidu_shouji(n, proxies=settings.PROXY_MODE, debug=settings.DEBUG)
#     for html in htmls:
#         items = parse_baidu_shouji(html)
#         try:
#             mongo = APPNameMongoDBHandler(settings.MONGO_URL, settings.MONGO_DB, 'app_name')
#             mongo.save_to_table('apk_name', items)
#         except Exception as e:
#             print('MongoDB错误:', list(items), '被忽略', e)
#     print('%s页抓取完成!' % n)
#
#
# @timer
# def run():
#     if settings.DEBUG:
#         page_range = 501, 502
#     else:
#         page_range = settings.PAGE_RANGE
#
#     pool = Pool()
#     pool.map(main, [n for n in range(*page_range)])
#
#
# if __name__ == '__main__':
#     run()