def test_update_query(): url = URL('http://example.com/') assert str(url.update_query({'a': '1'})) == 'http://example.com/?a=1' url = URL('http://example.com/?foo=bar') expected_url = URL('http://example.com/?foo=bar&baz=foo') assert url.update_query({'baz': 'foo'}) == expected_url assert url.update_query(baz='foo') == expected_url assert url.update_query("?baz=foo") == expected_url
def test_update_query(): url = URL("http://example.com/") assert str(url.update_query({"a": "1"})) == "http://example.com/?a=1" assert str(URL("test").update_query(a=1)) == "test?a=1" url = URL("http://example.com/?foo=bar") expected_url = URL("http://example.com/?foo=bar&baz=foo") assert url.update_query({"baz": "foo"}) == expected_url assert url.update_query(baz="foo") == expected_url assert url.update_query("baz=foo") == expected_url
def _mask_url(url: URL) -> str: if url.password: url = url.with_password('***') for key, val in url.query.items(): if RE_SECRET_WORDS.match(key): url = url.update_query({key: '***'}) return str(url)
async def _traced_clientsession_request(aiohttp, pin, func, instance, args, kwargs): method = get_argument_value(args, kwargs, 0, "method") # type: str url = URL(get_argument_value(args, kwargs, 1, "url")) # type: URL params = kwargs.get("params") headers = kwargs.get("headers") or {} with pin.tracer.trace("aiohttp.request", span_type=SpanTypes.HTTP, service=ext_service(pin, config.aiohttp_client)) as span: if pin._config["distributed_tracing"]: HTTPPropagator.inject(span.context, headers) kwargs["headers"] = headers # Params can be included separate of the URL so the URL has to be constructed # with the passed params. url_str = str(url.update_query(params) if params else url) parsed_url = parse.urlparse(url_str) set_http_meta( span, config.aiohttp_client, method=method, url=url_str, query=parsed_url.query, request_headers=headers, ) resp = await func(*args, **kwargs) # type: aiohttp.ClientResponse set_http_meta(span, config.aiohttp_client, response_headers=resp.headers, status_code=resp.status, status_msg=resp.reason) return resp
async def _get_camera_thumbnail_url(self, camera: Camera) -> str | None: """Get camera thumbnail URL using the first available camera entity.""" if not camera.is_connected or camera.is_privacy_on: return None entity_id: str | None = None entity_registry = self.async_get_registry() for channel in camera.channels: # do not use the package camera if channel.id == 3: continue base_id = f"{camera.mac}_{channel.id}" entity_id = entity_registry.async_get_entity_id( Platform.CAMERA, DOMAIN, base_id) if entity_id is None: entity_id = entity_registry.async_get_entity_id( Platform.CAMERA, DOMAIN, f"{base_id}_insecure") if entity_id: # verify entity is available entry = entity_registry.async_get(entity_id) if entry and not entry.disabled: break entity_id = None if entity_id is not None: url = URL(CameraImageView.url.format(entity_id=entity_id)) return str( url.update_query({ "width": THUMBNAIL_WIDTH, "height": THUMBNAIL_HEIGHT })) return None
async def search(self, query: str, limit: int = 3): query = KOREAN_PATTERN.sub('', query) words = [] for word in query.split(): word = word.strip() if word: words.append(word) if len(words) == 1: url = URL('https://api.stackexchange.com/2.2/search/advanced') url = url.update_query(q=query) response = await self._get(url) else: query = ' '.join(words) url = URL('https://api.stackexchange.com/2.2/similar') url = url.update_query(title=query) response = await self._get(url) return response['items'][:limit]
def search_url(cls, title: str, season: int): url = URL('https://9anime.to/filter') if season > 0: title = f"{title} {season}" url = url.update_query([ ('keyword', title), ('language[]', 'subbed'), ]) return url
async def list_images_in_repository(request): registry_state = request.app["registry_state"] repository = request.match_info["repository"] request.app["token_checker"].authenticate(request, repository, ["pull"]) try: tags = registry_state.get_tags(repository) except KeyError: raise exceptions.NameUnknown(repository=repository) tags.sort() include_link = False last = request.query.get("last", None) if last: start = tags.index(last) tags = tags[start:] n = request.query.get("n", None) if n is not None: n = int(n) if n < len(tags): include_link = True tags = tags[:n] headers = {} if include_link: url = URL(f"/v2/{repository}/tags/list") if n is not None: url = url.update_query({"n": str(n)}) url = url.update_query({"last": tags[-1]}) headers["Link"] = f'{url}; rel="next"' return web.json_response({ "name": repository, "tags": tags }, headers=headers, dumps=ujson.dumps)
def call_get(url: URL) -> str: """ Make a generic call to the TfL API. This function handles rate limits and retries, as well as ensuring that the API call has the app_id and app_key appended to the call. :param url: URL to make a GET request on for the TfL API :return: decoded string of response """ return requests.get(url.update_query(get_id_key())).text
async def shutdown(self): await self._refresh_token() sess_url = f'{self.endpoint}/sessions/{self.sess_id}' resp = await self.http_sess.delete(sess_url, headers=self.auth_hdrs) resp.raise_for_status() log.debug('deleted session:', self.sess_id) revoke_url = URL(f'{self.endpoint}/login/refreshToken') revoke_url = revoke_url.update_query({ 'refreshToken': self.refresh_token, }) resp = await self.http_sess.delete(revoke_url, headers=self.auth_hdrs) resp.raise_for_status() await self.http_sess.close()
async def auth_everything(request, handler): # during setup, no login needed if STATUS.force_local_mode or STATUS.setup_mode: # bypass security completely ses = await get_session(request) if not ses: ses = await new_session(request) accept_user_login(ses) return await handler(request) # whitelist of pages that do not need login # all other requests will need auth if handler in { login_page, login_post, captcha_image, kiddie_traps, expected_404s }: return await handler(request) # verify auth ses = await get_session(request) if ses: active = ses.get('active', 0) idle = time.time() - active if idle > settings.MAX_IDLE_TIME: # stale / idle timeout logging.warn("Idle timeout") ses.invalidate() ses = None if not ses: # no cookie/stale cookie, so send them to logout/in page u = URL("/login") target = request.path[1:] if target and target != 'logout': u = u.update_query(u=target) return HTTPFound(u) # normal path: continue to page ses['active'] = time.time() resp = await handler(request) # clearly an HTML request, so force no caching if '/static/' not in request.path: resp.headers['Cache-Control'] = 'no-cache' return resp
async def auth( homeserver: str, username: str, password: str, server: str, register: bool, list: bool, update_client: bool, device_name: str, sso: bool, sess: aiohttp.ClientSession, ) -> None: if list: await list_servers(server, sess) return endpoint = "register" if register else "login" url = URL(server) / "_matrix/maubot/v1/client/auth" / homeserver / endpoint if update_client: url = url.update_query({"update_client": "true"}) if sso: url = url.update_query({"sso": "true"}) req_data = {"device_name": device_name} else: req_data = { "username": username, "password": password, "device_name": device_name } async with sess.post(url, json=req_data) as resp: if not 200 <= resp.status < 300: await print_error(resp, is_register=register) elif sso: await wait_sso(resp, sess, server, homeserver) else: await print_response(resp, is_register=register)
async def download_csv(ua, _id, _ticker, date, _tf=3, _datf=5): _url = URL("http://export.finam.ru/{t}_{d:%d%m%y}_{d:%d%m%y}.csv".format( t=_ticker.upper(), d=date)) _url = _url.update_query({ 'market': 1, # moex акции 'em': _id, # номер инструмента 'apply': 0, 'p': _tf, # таймфрейм: 1 - тики, 2 - минуты; 3 - 5мин., 4 - 10мин., 7 - час, 8 - день, 9 - неделя, 10 - месяц 'datf': _datf, # csv fields collection: 12 - verbose, 5 - simple 'at': 1, # заголовки 'code': _ticker, 'cn': _ticker, # тикер инструмента 'dtf': 3, # csv date format 'tmf': 3, # csv time format 'MSOR': 1, # 0 - start, 1 - end of candle 'df': date.day, 'dt': date.day, 'mf': date.month - 1, 'mt': date.month - 1, 'yf': date.year, 'yt': date.year, 'from': f"{date:%d.%m.%Y}", 'to': f"{date:%d.%m.%Y}", 'f': f"{_ticker}_{date:%d%m%y}_{date:%d%m%y}", 'e': '.csv', 'sep': 3, # разделитель полей 'sep2': 1, # разделитель разрядов 'mstime': 'on', 'mstimever': 1, }) res = await ua.get( _url, headers={ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0', 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Connection': 'close', }) res.raise_for_status() # res.content is an async_generator _csv = await res.text() assert res.closed return _csv
class TokenGetter: def __init__(self, session: aiohttp.ClientSession, realm, service, username, password): self.session = session self.realm = realm self.service = service self.username = username self.password = password self.url = URL(realm).update_query(service=service) self.auth = aiohttp.BasicAuth(self.username, self.password) async def get_token(self, repository, actions): actions_str = ",".join(actions) scope = f"repository:{repository}:{actions_str}" url = self.url.update_query(scope=scope) async with self.session.get(url, auth=self.auth) as resp: if resp.status != 200: raise RuntimeError("Unable to get authentication token") payload = await resp.json() return payload["access_token"]
async def fetch_page(session, url, params=PARAMS): """ Fetch a generic url, read data as json and return a promise Parameters ---------- session : aiohttp.ClientSession an async session object. url : str the desidered url. params : MultiDict, optional Additional params for request. The default is PARAMS. Returns ------- data : dict Json content of the page url : str The requested URL (for debugging purposes) """ # define a URL with yarl url = URL(url) url = url.update_query(params) logger.debug(f"GET {url}") try: async with session.get(url, headers=HEADERS) as response: # try to read json data data = await parse_json(response, url) return data, url except aiohttp.client_exceptions.ServerDisconnectedError as exc: logger.error(repr(exc)) logger.warning("server disconnected during %s" % url) return {}, url
async def fetch_url(session, url=BIOSAMPLE_URL, params=BIOSAMPLE_PARAMS): """ Fetch a generic url, read data as json and return a promise Parameters ---------- session : aiohttp.ClientSession an async session object. url : str, optional the desidered url. The default is BIOSAMPLE_URL. params : MultiDict, optional Additional params for request. The default is BIOSAMPLE_PARAMS. Returns ------- dict json content of the page """ """""" # define a URL with yarl url = URL(url) url = url.update_query(params) logger.debug(url) try: async with session.get(url, headers=HEADERS) as response: # try to read json data return await parse_json(response, url) except aiohttp.client_exceptions.ServerDisconnectedError as exc: logger.error(repr(exc)) logger.warning( "server disconnected during %s" % url) return {}
def test_update_query_with_multiple_args(): url = URL('http://example.com/') with pytest.raises(ValueError): url.update_query('a', 'b')
def test_update_query_with_args_and_kwargs(): url = URL('http://example.com/') with pytest.raises(ValueError): url.update_query('a', foo='bar')
def test_update_query_with_multiple_args(): url = URL("http://example.com/") with pytest.raises(ValueError): url.update_query("a", "b")
def create_url_from_request(request): url = URL("https://mars-photos.herokuapp.com/api/v1/rovers/curiosity/photos") url = url.update_query(dict(sol=request.sol, page=request.page)) if request.camera: url = url.update_query(dict(camera=request.camera)) return str(url)
def search_url(cls, query: str): url = URL('https://animedao.com/search/') url = url.update_query(key=query) return url
async def get_title(self, query: str): url = URL('https://myanimelist.net/anime.php') url = url.update_query(q=query) r = await self.session.get(url) text = await r.read() return self.extract_title(text)
def mock_next_link(self_url: URL): return self_url.update_query({'after': 'mock_after_id'})
def test_update_query_multiple_keys(): url = URL('http://example.com/path?a=1&a=2') u2 = url.update_query([('a', '3'), ('a', '4')]) assert str(u2) == 'http://example.com/path?a=3&a=4'
def test_update_query_with_args_and_kwargs(): url = URL("http://example.com/") with pytest.raises(ValueError): url.update_query("a", foo="bar")
loop.close() del loop def cert_path(*args): return os.path.join( os.path.abspath(os.path.dirname(__file__)), "certs", *args ) AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://*****:*****@localhost/")) amqp_urls = { "amqp": AMQP_URL, "amqp-named": AMQP_URL.update_query(name="pytest"), "amqps": AMQP_URL.with_scheme("amqps").with_query( {"cafile": cert_path("ca.pem"), "no_verify_ssl": 1} ), "amqps-client": AMQP_URL.with_scheme("amqps").with_query( { "cafile": cert_path("ca.pem"), "keyfile": cert_path("client.key"), "certfile": cert_path("client.pem"), "no_verify_ssl": 1, } ), } amqp_url_list, amqp_url_ids = [], []
def cert_path(*args): return os.path.join( os.path.abspath(os.path.dirname(__file__)), "certs", *args, ) AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://*****:*****@localhost/")) amqp_urls = { "amqp": AMQP_URL, "amqp-named": AMQP_URL.update_query(name="pytest"), "amqps": AMQP_URL.with_scheme("amqps").with_query( { "cafile": cert_path("ca.pem"), "no_verify_ssl": 1 }, ), "amqps-client": AMQP_URL.with_scheme("amqps").with_query( { "cafile": cert_path("ca.pem"), "keyfile": cert_path("client.key"), "certfile": cert_path("client.pem"), "no_verify_ssl": 1, }, ), }
def test_update_query_multiple_keys(): url = URL("http://example.com/path?a=1&a=2") u2 = url.update_query([("a", "3"), ("a", "4")]) assert str(u2) == "http://example.com/path?a=3&a=4"
class NCSWebSocketClient: """Client for Nuance Cloud Services (NCS) WebSocket API For more info on the protocol: https://developer.nuance.com/mix/documentation/websockets/ This client only supports one session + transaction at a time. """ def __init__(self, url, app_id, app_key): self.url = URL(url) self.app_id = app_id self.app_key = app_key self._http_session = None self._ws_client = None @asyncio.coroutine def connect(self): self._http_session = aiohttp.ClientSession() url = self.url.update_query(app_id=self.app_id, app_key=self.app_key, algorithm='key') try: self._ws_client = yield from self._http_session.ws_connect(url) except WSServerHandshakeError as ws_error: info = '%s %s\n' % (ws_error.code, ws_error.message) for (key, value) in ws_error.headers.items(): info += '%s: %s\n' % (key, value) if ws_error.code == 401: raise RuntimeError('Authorization failure:\n%s' % info) from ws_error elif 500 <= ws_error.code < 600: raise RuntimeError('Server error:\n%s' % info) from ws_error else: raise ws_error @asyncio.coroutine def init_session(self, user_id, device_id, **kwargs): session = NCSSession(client=self) yield from session.initiate(user_id, device_id, **kwargs) return session @asyncio.coroutine def receive_json(self, *args, **kwargs): message = yield from self._ws_client.receive_json(*args, **kwargs) global json_list json_list.append(message) self.log(message) return message @asyncio.coroutine def send_json(self, message, *args, **kwargs): self.log(message, sending=True) yield from self._ws_client.send_json(message, *args, **kwargs) @asyncio.coroutine def send_bytes(self, bytes_, *args, **kwargs): yield from self._ws_client.send_bytes(bytes_, *args, **kwargs) @asyncio.coroutine def close(self): if self._ws_client is not None and not self._ws_client.closed: yield from self._ws_client.close() if self._http_session is not None and not self._http_session.closed: self._http_session.close() @staticmethod def log(message, sending=False): print('>>>>' if sending else '<<<<') print(datetime.datetime.now()) pprint.pprint(message) print()
class Unbound(BaseService): _base_url: URL = field(init=False) def __attrs_post_init__(self, /) -> None: self._base_url = URL( 'http://unbound.biola.edu/index.cfm?method=searchResults.doSearch') async def get_passage(self, bible: Bible, verses: VerseRange, /) -> Passage: url = self._base_url.update_query({ 'search_type': 'simple_search', 'parallel_1': bible.service_version, 'book_section': '00', 'book': _book_map[verses.book], 'displayFormat': 'normalNoHeader', 'from_chap': str(verses.start.chapter), 'from_verse': str(verses.start.verse), }) if verses.end: url = url.update_query({ 'to_chap': str(verses.end.chapter), 'to_verse': str(verses.end.verse) }) async with self.session.get(url) as response: text = await response.text() soup = BeautifulSoup(text, 'html.parser') verse_table = soup.select_one('table table table') if verse_table is None: raise DoNotUnderstandError rows = verse_table.select('tr') if rows[0].get_text('').strip() == 'No Verses Found': raise DoNotUnderstandError rtl = False for row in rows: cells = row.select('td') if len(cells) == 2 and cells[1].string == '\xa0': rtl = True if (len(cells) != 2 or cells[0].string == '\xa0' or cells[1].string == '\xa0'): row.decompose() elif rtl: cells[1].contents[0].insert_before(cells[1].contents[1]) cells[1].insert_before(cells[0]) return Passage( text=self.replace_special_escapes( bible, _number_re.sub(r'__BOLD__\1__BOLD__', verse_table.get_text('')), ), range=verses, version=bible.abbr, ) async def search( self, bible: Bible, terms: list[str], /, *, limit: int = 20, offset: int = 0, ) -> SearchResults: async with self.session.post( self._base_url.update_query({ 'search_type': 'advanced_search', 'parallel_1': bible.service_version, 'displayFormat': 'normalNoHeader', 'book_section': 'ALL', 'book': 'ALL', 'search': ' AND '.join(terms), 'show_commentary': '0', 'show_context': '0', 'show_illustrations': '0', 'show_maps': '0', })) as response: text = await response.text() soup = BeautifulSoup(text, 'html.parser') verse_table = soup.select_one('table table table') if verse_table is None: raise DoNotUnderstandError rows = verse_table.select('tr') if rows[0].get_text('').strip() == 'No Verses Found': return SearchResults([], 0) rows[0].decompose() rows[-2].decompose() rows[-1].decompose() passages: list[Passage] = [] chapter_string = '' for row in verse_table.select('tr'): cells = row.select('td') if len(cells) < 2: continue if cells[0].string == '\xa0': chapter_string = row.get_text('').strip() else: verse_string = cells[0].get_text('').strip()[:-1] passage_text = cells[1].get_text('') passages.append( Passage( text=self.replace_special_escapes( bible, passage_text), range=VerseRange.from_string( f'{chapter_string}:{verse_string}'), version=bible.abbr, )) return SearchResults(passages, len(passages))