def test_proxy_from_url(url, expected_url, expected_headers): proxy = httpx.Proxy(url) assert str(proxy.url) == expected_url assert dict(proxy.headers) == expected_headers assert repr(proxy) == "Proxy(url='{}', headers={})".format( expected_url, str(expected_headers))
def test_proxy_from_url(): proxy = httpx.Proxy("https://example.com") assert str(proxy.url) == "https://example.com" assert proxy.auth is None assert proxy.headers == {} assert repr(proxy) == "Proxy('https://example.com')"
def __init__(self, API_Key: int, Secret_Key: str, bot_id: str, session: str, proxy: str = None): self._API_Key = API_Key self._Secret_Key = Secret_Key self._bot_id = bot_id self._session = session if proxy: self._Proxy = httpx.Proxy( url="http://" + proxy, mode= "TUNNEL_ONLY" # May be "TUNNEL_ONLY" or "FORWARD_ONLY". Defaults to "DEFAULT". ) self._client = httpx.AsyncClient(proxies=self._Proxy) else: self._client = httpx.AsyncClient() self._token = requests.post( self._token_url.format(self._API_Key, self._Secret_Key)).json()['access_token'] print(self._token) print("*&****") pass
def test_proxy_with_auth_from_url(): proxy = httpx.Proxy("https://*****:*****@example.com") assert str(proxy.url) == "https://example.com" assert proxy.auth == ("username", "password") assert proxy.headers == {} assert repr( proxy) == "Proxy('https://example.com', auth=('username', '********'))"
def get_proxy(open_proxy: bool) -> dict: if not open_proxy: return {} proxy = config.rss_proxy return (httpx.Proxy( url="http://" + proxy, # May be "TUNNEL_ONLY" or "FORWARD_ONLY". Defaults to "DEFAULT". mode="TUNNEL_ONLY", ) if proxy else {})
def proxy_type(): """ 检查代理类型 """ proxies = httpx.Proxy( url="https://localhost:8030", mode="TUNNEL_ONLY", # 这个代理必须是隧道类型 ) with httpx.Client(proxies=proxies) as client: # This HTTP request will be tunneled instead of forwarded r = client.get("http://example.com")
async def testProxy(proxy_addr: str) -> None: proxies = httpx.Proxy( url=f"http://{proxy_addr}", mode="TUNNEL_ONLY", ) async with httpx.AsyncClient(proxies=proxies) as client: try: _ = await client.request("GET", "https://google.com") except Exception: pass else: print(proxy_addr)
def __init__(self, app_id: int, appkey: str, session: str, proxy: str = None): self._app_id = app_id # 应用标识(AppId) int self._appkey = appkey # appkey self._session = session # 会话标识(应用内唯一) if proxy: self._Proxy = httpx.Proxy( url="http://" + proxy, mode= "TUNNEL_ONLY" # May be "TUNNEL_ONLY" or "FORWARD_ONLY". Defaults to "DEFAULT". ) self._client = httpx.AsyncClient(proxies=self._Proxy) else: self._client = httpx.AsyncClient(proxies={}) pass
async def main(): to_resend_list = [] for row in burp_log(BURP_LOG_FILE): if "XXX" in dict(row.request.headers): continue if all(h not in row.request.addr for h in ["mail.ru", "ok.ru"]): continue f = furl(row.request.url) f.add({'come': "to daddy 1"}) f.add({'come': "to daddy 2"}) f.path.segments = f.path.segments[:1] + ["come", "to", "daddy" ] + f.path.segments[1:] row.request.url = f.url row.request.headers.append(('XXX', "come to daddy")) print(row.request.url) to_resend_list.append(row.request) limit = trio.CapacityLimiter(20) async def fetch(method, url, content, headers): try: res = await client.request(req.method, req.url, content=req.body, headers=headers, timeout=30, allow_redirects=False) print(f"{req.method:8} {req.url} [{res.status_code}]") except httpx.ReadTimeout: print(f"{req.method:8} {req.url} [ timeout -1 ]") async with httpx.AsyncClient(proxies=httpx.Proxy(url=HTTP_PROXY), verify=False) as client: async with trio.open_nursery() as nursery: for req in to_resend_list: async with limit: headers = dict(req.headers) headers.pop('Content-Length', None) nursery.start_soon(fetch, req.method, req.url, req.body, headers)
if not sys.version_info.major == 3 and sys.version_info.minor >= 7: from contextlib import asynccontextmanager # pylint: disable=no-name-in-module else: from .contextlib import asynccontextmanager class NetworkType(Enum): NORMAL = 0 TOR = 1 NETWORK_PROXIES = { NetworkType.TOR: httpx.Proxy( url=TOR_HTTP_PROXY, mode="TUNNEL_ONLY" # Tor is a tunnel only proxy ) } TOR_PROXY_ERROR = { 403: "Forbidden (connection refused|exit policy|connection reset|entry policy violation)", 404: "Not Found (resolve failed|no route)", 500: "Internal Server Error", 502: "Bad Gateway (destroy cell received|unexpected close|hibernating server|internal error" + "|resource limit|tor protocol violation)", 504: "Gateway Timeout",
def test_invalid_proxy_mode(): with pytest.raises(ValueError): httpx.Proxy("https://example.com", mode="INVALID")
def test_invalid_proxy_scheme(): with pytest.raises(ValueError): httpx.Proxy("invalid://example.com")
#!/usr/bin/env python # -*- coding: utf-8 -*- """ 使用requests请求代理服务器 请求http和https网页均适用 """ import httpx # 隧道域名:端口号 tunnel = "tpsXXX.kdlapi.com:15818" # 用户名和密码方式 username = "******" password = "******" proxy_url = "http://%(user)s:%(pwd)s@%(proxy)s/" % { "user": username, "pwd": password, "proxy": tunnel } proxies = httpx.Proxy(url=proxy_url, mode="DEFAULT") with httpx.Client(proxies=proxies) as client: r = client.get('http://dev.kdlapi.com/testproxy') print(r.text)
# confuse the client: "accept-ranges", "content-encoding", "content-length", ) ) config = Config(".env") DEBUG = config("DEBUG", cast=bool, default=False) HTTP_PROXY = config("HTTP_PROXY", default=None) # Use forward-only mode so the proxy can see and cache even HTTPS requests. # https://www.python-httpx.org/advanced/#proxy-mechanisms http_client = httpx.AsyncClient( proxies=httpx.Proxy(url=HTTP_PROXY, mode="FORWARD_ONLY") if HTTP_PROXY else {} ) async def feed(request: Request) -> Response: # If this client has requested this page from us before and we gave them # our special ETag for an archive page, then we can immediately conclude # that whatever response we gave them before is fine. if any( ARCHIVE_ETAG in get_list(v) for v in request.headers.getlist("If-None-Match") ): # XXX: https://tools.ietf.org/html/rfc7232#section-4.1 says "The server # generating a 304 response MUST generate any of the following header # fields that would have been sent in a 200 (OK) response to the same # request: Cache-Control, Content-Location, Date, ETag, Expires, and # Vary," but we don't have most of those available without forwarding
async def dowimg(url: str, img_proxy: bool) -> str: try: img_path = file_path + 'imgs' + os.sep if not os.path.isdir(img_path): logger.info(img_path + '文件夹不存在,已重新创建') os.makedirs(img_path) # 创建目录 file_suffix = os.path.splitext(url) # 返回列表[路径/文件名,文件后缀] name = str(uuid.uuid4()) if img_proxy: Proxy = httpx.Proxy( url="http://" + proxy, mode= "TUNNEL_ONLY" # May be "TUNNEL_ONLY" or "FORWARD_ONLY". Defaults to "DEFAULT". ) else: Proxy = {} async with httpx.AsyncClient(proxies=Proxy) as client: try: if config.CLOSE_PIXIV_CAT and url.find('pixiv.cat') >= 0: img_proxy = False headers = {'referer': config.PIXIV_REFERER} img_id = re.sub('https://pixiv.cat/', '', url) img_id = img_id[:-4] info_list = img_id.split('-') req_json = requests.get( 'https://api.imjad.cn/pixiv/v1/?type=illust&id=' + info_list[0]).json() if len(info_list) >= 2: url = req_json['response'][0]['metadata']['pages'][ int(info_list[1]) - 1]['image_urls']['large'] else: url = req_json['response'][0]['image_urls']['large'] # 使用第三方反代服务器 url = re.sub('i.pximg.net', config.PIXIV_PROXY, url) pic = await client.get(url, headers=headers, timeout=100.0) else: pic = await client.get(url) # 大小控制,图片压缩 if (len(pic.content) / 1024 > config.ZIP_SIZE): filename = await zipPic(pic.content, name) else: if len(file_suffix[1]) > 0: filename = name + file_suffix[1] elif pic.headers['Content-Type'] == 'image/jpeg': filename = name + '.jpg' elif pic.headers['Content-Type'] == 'image/png': filename = name + '.png' else: filename = name + '.jpg' with codecs.open(img_path + filename, "wb") as dump_f: dump_f.write(pic.content) if config.IsLinux: imgs_name = img_path + filename if len(imgs_name) > 0: imgs_name = os.getcwd() + re.sub( r'\./|\\', r'/', imgs_name) return imgs_name else: imgs_name = img_path + filename if len(imgs_name) > 0: imgs_name = os.getcwd() + re.sub( '\./', r'\\', imgs_name) imgs_name = re.sub(r'\\', r'\\\\', imgs_name) imgs_name = re.sub(r'/', r'\\\\', imgs_name) return imgs_name except IOError as e: logger.error('图片下载失败 2 E:' + str(e)) return '' except BaseException as e: logger.error('图片下载失败 1 E:' + str(e)) return ''
@pytest.mark.parametrize( ["proxies", "expected_proxies"], [ ("http://127.0.0.1", [("all", "http://127.0.0.1")]), ({ "all": "http://127.0.0.1" }, [("all", "http://127.0.0.1")]), ( { "http": "http://127.0.0.1", "https": "https://127.0.0.1" }, [("http", "http://127.0.0.1"), ("https", "https://127.0.0.1")], ), (httpx.Proxy("http://127.0.0.1"), [("all", "http://127.0.0.1")]), ( { "https": httpx.Proxy("https://127.0.0.1"), "all": "http://127.0.0.1" }, [("all", "http://127.0.0.1"), ("https", "https://127.0.0.1")], ), ], ) def test_proxies_parameter(proxies, expected_proxies): client = httpx.AsyncClient(proxies=proxies) for proxy_key, url in expected_proxies: assert proxy_key in client.proxies assert client.proxies[proxy_key].proxy_url == url
def test_proxy_from_url(): proxy = httpx.Proxy("https://example.com") assert repr( proxy ) == "Proxy(url='https://example.com', headers={}, mode='DEFAULT')"