def crawl_89ip(self): start_url = 'http://www.89ip.cn/apijk/?&tqsl=1000&sxa=&sxb=&tta=&ports=&ktip=&cf=1' html = get_page(start_url) if html: find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S) ip_ports = find_ips.findall(html) for address_port in ip_ports: yield address_port
def crawl_ip181(self): start_url = 'http://www.ip181.com/' html = get_page(start_url) ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>') # \s* 匹配空格,起到换行作用 re_ip_address = ip_address.findall(html) for address, port in re_ip_address: result = address + ':' + port yield result.replace(' ', '')
def crawl_ip3366(self): for page in range(1, 4): start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page) html = get_page(start_url) ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>') # \s * 匹配空格,起到换行作用 re_ip_address = ip_address.findall(html) for address, port in re_ip_address: result = address + ':' + port yield result.replace(' ', '')
def crawl_kuaidaili(self): for i in range(1, 4): start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i) html = get_page(start_url) if html: ip_address = re.compile('<td data-title="IP">(.*?)</td>') re_ip_address = ip_address.findall(html) port = re.compile('<td data-title="PORT">(.*?)</td>') re_port = port.findall(html) for address, port in zip(re_ip_address, re_port): address_port = address + ':' + port yield address_port.replace(' ', '')
def crawl_89ip(self, num=100): """ 提取89免费代理 :param num: 默认一次提取100个 :return: """ start_url = "http://www.89ip.cn/tqdl.html?num={}&address=&kill_address=&port=&kill_port=&isp=".format(num) html = get_page(start_url) response = etree.HTML(html) ip_port_li = response.xpath('//div[@style="padding-left:20px;"]/text()')[:-1] for ip_port in ip_port_li: yield ip_port.strip()
def crawl_iphai(self): start_url = 'http://www.iphai.com/' html = get_page(start_url) if html: find_tr = re.compile('<tr>(.*?)</tr>', re.S) trs = find_tr.findall(html) for s in range(1, len(trs)): find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S) re_ip_address = find_ip.findall(trs[s]) find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S) re_port = find_port.findall(trs[s]) for address, port in zip(re_ip_address, re_port): address_port = address + ':' + port yield address_port.replace(' ', '')
def crawl_ip366(self): """ 抓取云代理 :return:抓取的代理 """ for page in range(1, 4): # 目前本网站已更新到7页免费代理 start_url = "http://www.ip3366.net/free/?stype=1&page={}".format(page) html = get_page(start_url) response = etree.HTML(html) trs = response.xpath("//div[@id='list']/table//tr")[1:] for tr in trs: ip = tr.xpath("./td[1]/text()")[0].strip() port = tr.xpath("./td[2]/text()")[0].strip() yield ":".join([ip, port])
def crawl_ip3366(self): for i in range(1, 4): start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i) html = get_page(start_url) if html: find_tr = re.compile('<tr>(.*?)</tr>', re.S) trs = find_tr.findall(html) for s in range(1, len(trs)): find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>') re_ip_address = find_ip.findall(trs[s]) find_port = re.compile('<td>(\d+)</td>') re_port = find_port.findall(trs[s]) for address, port in zip(re_ip_address, re_port): address_port = address + ':' + port yield address_port.replace(' ', '')
def crawl_kuaidaili(self): """ 抓取快代理 :return:抓取的代理 """ for i in range(1, 4): # 本网站免费代理较多,页码数为3362 start_url = "https://www.kuaidaili.com/free/inha/{}/".format(i) html = get_page(start_url) if html: ip_pattern = re.compile(r'<td data-title="IP">(.*?)</td>') ip = ip_pattern.findall(html) port_pattern = re.compile(r'<td data-title="PORT">(.*?)</td>') port = port_pattern.findall(html) for ip, port in zip(ip, port): ip_port = ip+":"+port yield ip_port.replace(" ", "")
def crawl_daili66(self, page_count=4): """ 抓取代理66 此网站ip代理丰富 提供了大量的国外ip代理 :param page_count:页码 :return: 抓取的代理 """ start_url = "http://www.66ip.cn/{}.html" # 此链接提供了大量的国内外免费的ip代理,页码数1931,代理总数:3724 urls = [start_url.format(page) for page in range(1, page_count+1)] for url in urls: html = get_page(url) if html: doc = pq(html) trs = doc(".containerbox table tr:gt(0)").items() for tr in trs: ip = tr.find("td:nth-child(1)").text() port = tr.find("td:nth-child(2)").text() yield ":".join([ip, port])
def crawl_daili66(self, page_count=4): """ 获取代理66 :param page_count: 页码 :return: 代理 """ start_url = 'http://www.66ip.cn/{}.html' urls = [start_url.format(page) for page in range(1, page_count + 1)] for url in urls: print('Crawling', url) html = get_page(url) if html: doc = pq(html) trs = doc('.containerbox table tr:gt(0)').items() for tr in trs: ip = tr.find('td:nth-child(1)').text() port = tr.find('td:nth-child(2)').text() yield ':'.join([ip, port])
def crawl_data5u(self): start_url = 'http://www.data5u.com/free/gngn/index.shtml' headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86', 'Host': 'www.data5u.com', 'Referer': 'http://www.data5u.com/free/index.shtml', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36', } html = get_page(start_url, options=headers) if html: ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S) re_ip_address = ip_address.findall(html) for address, port in re_ip_address: result = address + ':' + port yield result.replace(' ', '')
def crawl_xicidaili(self): for i in range(1, 3): start_url = 'http://www.xicidaili.com/nn/{}'.format(i) headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Cookie': '_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3', 'Host': 'www.xicidaili.com', 'Referer': 'http://www.xicidaili.com/nn/3', 'Upgrade-Insecure-Requests': '1', } html = get_page(start_url, options=headers) if html: find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S) trs = find_trs.findall(html) for tr in trs: find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>') re_ip_address = find_ip.findall(tr) find_port = re.compile('<td>(\d+)</td>') re_port = find_port.findall(tr) for address, port in zip(re_ip_address, re_port): address_port = address + ':' + port yield address_port.replace(' ', '')
def crawl_xicidaili(self): """ 抓取西祠免费代理 :return: 抓取到的代理 """ for i in range(1, 3): # 本网站免费代理较多,页码为4052 start_url = "https://www.xicidaili.com/nn/{}".format(i) headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8," "application/signed-exchange;v=b3;q=0.9", "Accept-Language": "en,en-US;q=0.9,zh-CN;q=0.8,zh;q=0.7", "Connection": "keep-alive", "Cookie": "_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJTYzNGYxMDI0ZTQyYTFiY2M3NmU5MjhkZGM5Yzd" "kNDhlBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMWo3aVVEbER4dGpLSFdzTEdGNERTbGQ5OGQ0ekoxWDFwUm9kUkMrcG5" "ReDQ9BjsARg%3D%3D--1c2745a54b00042c950d46dbb785f4bab0756adb; Hm_lvt_0cf76c77469e965d2957f05" "53e6ecf59=1585454330; Hm_lpvt_0cf76c77469e965d2957f0553e6ecf59=1585454780", "Host": "www.xicidaili.com", "If-None-Match": "W/be944a45da779bcfb93e7d26e93e91a6", "Sec-Fetch-Dest": "document", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-Site": "none", "Sec-Fetch-User": "******", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/80.0.3987.132 Safari/537.36", } html = get_page(start_url, options=headers) if html: trs_pattern = re.compile(r'<tr class=.*?>(.*?)</tr>', re.S) trs = trs_pattern.findall(html) for tr in trs: ip_pattren = re.compile(r'\d+\.\d+\.\d+\.\d+') ip_address = ip_pattren.findall(tr) port_pattern = re.compile(r'<td>(\d+)</td>') port = port_pattern.findall(tr) for address, port in zip(ip_address, port): ip_port = address+":"+port yield ip_port.replace(" ", "")