def __log_and_prepare_request(self, method, url, params, data, files, headers, auth): hooks = {'response': functools.partial(_log_response_data, self.log)} if auth: bound_auth = auth.bind_to_service(self) else: bound_auth = None request = requests.Request(method=method, url=url, params=params, data=data, files=files, headers=headers, auth=bound_auth) p_request = self.session.prepare_request(request) p_request.hooks = {'response': hooks['response']} self.log.debug('request method: %s', request.method) self.log.debug('request url: %s', p_request.url) if isinstance(p_request.headers, (dict, collections.Mapping)): for key, val in sorted(p_request.headers.iteritems()): if key.lower().endswith('password'): val = '<redacted>' self.log.debug('request header: %s: %s', key, val) if isinstance(request.params, (dict, collections.Mapping)): for key, val in sorted(urlparse.parse_qsl( urlparse.urlparse(p_request.url).query, keep_blank_values=True)): if key.lower().endswith('password'): val = '<redacted>' self.log.debug('request param: %s: %s', key, val) if isinstance(request.data, (dict, collections.Mapping)): content_type, content_type_params = cgi.parse_header( p_request.headers.get('content-type') or '') if content_type == 'multipart/form-data': data = cgi.parse_multipart(io.BytesIO(p_request.body), content_type_params) elif content_type == 'application/x-www-form-urlencoded': data = dict(urlparse.parse_qsl(p_request.body, keep_blank_values=True)) else: data = request.data for key, val in sorted(data.items()): # pylint: disable=superfluous-parens if key in (request.files or {}): # We probably don't want to include the contents of # entire files in debug output. continue # pylint: enable=superfluous-parens if key.lower().endswith('password'): val = '<redacted>' self.log.debug('request data: %s: %s', key, val) if isinstance(request.files, (dict, collections.Mapping)): for key, val in sorted(request.files.iteritems()): if hasattr(val, '__len__'): val = '<{0} bytes>'.format(len(val)) self.log.debug('request file: %s: %s', key, val) return p_request
def __log_and_prepare_request(self, method, url, params, data, files, headers, auth): hooks = {'response': functools.partial(_log_response_data, self.log)} if auth: bound_auth = auth.bind_to_service(self) else: bound_auth = None request = requests.Request(method=method, url=url, params=params, data=data, files=files, headers=headers, auth=bound_auth) p_request = self.session.prepare_request(request) p_request.hooks = {'response': hooks['response']} self.log.debug('request method: %s', request.method) self.log.debug('request url: %s', p_request.url) if isinstance(p_request.headers, (dict, collections.Mapping)): for key, val in sorted(six.iteritems(p_request.headers)): if key.lower().endswith('password'): val = '<redacted>' self.log.debug('request header: %s: %s', key, val) if isinstance(request.params, (dict, collections.Mapping)): for key, val in sorted(urlparse.parse_qsl( urlparse.urlparse(p_request.url).query, keep_blank_values=True)): if key.lower().endswith('password'): val = '<redacted>' self.log.debug('request param: %s: %s', key, val) if isinstance(request.data, (dict, collections.Mapping)): content_type, content_type_params = cgi.parse_header( p_request.headers.get('content-type') or '') if content_type == 'multipart/form-data': data = cgi.parse_multipart(io.BytesIO(p_request.body), content_type_params) elif content_type == 'application/x-www-form-urlencoded': data = dict(urlparse.parse_qsl(p_request.body, keep_blank_values=True)) else: data = request.data for key, val in sorted(data.items()): # pylint: disable=superfluous-parens if key in (request.files or {}): # We probably don't want to include the contents of # entire files in debug output. continue # pylint: enable=superfluous-parens if key.lower().endswith('password'): val = '<redacted>' self.log.debug('request data: %s: %s', key, val) if isinstance(request.files, (dict, collections.Mapping)): for key, val in sorted(six.iteritems(request.files)): if hasattr(val, '__len__'): val = '<{0} bytes>'.format(len(val)) self.log.debug('request file: %s: %s', key, val) return p_request
def login(self, username, password): self.logout() params = { 'client_id': CLIENT_ID, 'audience': 'https://api.sky.co.nz', 'redirect_uri': 'https://www.skygo.co.nz', 'connection': 'Sky-Internal-Connection', 'scope': 'openid profile email offline_access', 'response_type': 'code', } resp = self._session.get('https://login.sky.co.nz/authorize', params=params, allow_redirects=False) parsed = urlparse(resp.headers['location']) payload = dict(parse_qsl(parsed.query)) payload.update({ 'username': username, 'password': password, 'tenant': 'skynz-prod', 'client_id': CLIENT_ID, 'client': None, }) resp = self._session.post( 'https://login.sky.co.nz/usernamepassword/login', json=payload) if not resp.ok: data = resp.json() raise APIError(_(_.LOGIN_ERROR, msg=data['message'])) soup = BeautifulSoup(resp.text, 'html.parser') payload = {} for e in soup.find_all('input'): if 'name' in e.attrs: payload[e.attrs['name']] = e.attrs.get('value') resp = self._session.post('https://login.sky.co.nz/login/callback', data=payload, allow_redirects=False) parsed = urlparse(resp.headers['location']) data = dict(parse_qsl(parsed.query)) payload = { 'code': data['code'], 'client_id': CLIENT_ID, 'grant_type': 'authorization_code', 'redirect_uri': 'https://www.skygo.co.nz' } self._oauth_token(payload)
def reorder_params(params): parsed = None if PY3: if isinstance(params, binary_type): params = params.decode("ascii") parsed = parse_qsl(params, encoding="utf-8") else: parsed = parse_qsl(params) if parsed: return urlencode(sorted(parsed, key=lambda kv: kv[0])) else: # Parsing failed, it may be a simple string. return params
def lists(url, request, content_type='application/json'): parameters = dict(parse_qsl(url.query)) page = try_convert(parameters.get('page'), int) or 1 limit = try_convert(parameters.get('limit'), int) or 10 # Retrieve items from fixture items = get_json(url.netloc, url.path, url.query) if items is None: return httmock.response(404, request=request) # Calculate page count and item offset offset = (page - 1) * limit page_count = int(math.ceil(float(len(items)) / limit)) return httmock.response( 200, json.dumps(items[offset:offset + limit]), { 'Content-Type': content_type, 'X-Pagination-Page': page, 'X-Pagination-Limit': limit, 'X-Pagination-Page-Count': page_count, 'X-Pagination-Item-Count': len(items) }, request=request )
def get_partner_url(self, account, nick_name, url, key, partner_url): """ 免二次登陆地址 https://docs.eeo.cn/api/zh-hans/broadcast/getWebcastUrl.html https://docs.eeo.cn/api/zh-hans/classroom/addCourseClass.html :param account: 账户 :param nick_name: 昵称 :param url: 通过接口获得播放器链接 :param key: url签名参数名 :param partner_url: url前缀 """ parsed_url = list(urlparse(url)) parsed_qs = dict(parse_qsl(parsed_url[4])) parsed_qs['account'] = account parsed_qs['nickname'] = nick_name parsed_qs['checkCode '] = self.get_sign(parsed_qs[key] + account + nick_name) url = partner_url if '?' not in url: url += "?" if not url.endswith(("&", "?")): url += "&" url += urlencode(parsed_qs) return url
def conninfo_uri_parse(dsn): ret = {} r = urlparse(dsn) if r.username: ret['user'] = r.username if r.password: ret['password'] = r.password if r.path[1:]: ret['dbname'] = r.path[1:] hosts = [] ports = [] for netloc in r.netloc.split('@')[-1].split(','): host = port = None if '[' in netloc and ']' in netloc: host = netloc.split(']')[0][1:] tmp = netloc.split(':', 1) if host is None: host = tmp[0] if len(tmp) == 2: host, port = tmp if host is not None: hosts.append(host) if port is not None: ports.append(port) if hosts: ret['host'] = ','.join(hosts) if ports: ret['port'] = ','.join(ports) ret = {name: unquote(value) for name, value in ret.items()} ret.update({name: value for name, value in parse_qsl(r.query)}) if ret.get('ssl') == 'true': del ret['ssl'] ret['sslmode'] = 'require' return ret
def get_kodi_header_formatted_url(self, url, options=None): if options is None: options = {} if url.startswith('http'): url_parts = urlparse(url) url = url_parts.path if url_parts.query: url = '?'.join([url, url_parts.query]) access_path_dbl = '/%s/%s/' % \ (self.access_path.replace('/', ''), self.access_path.replace('/', '')) location = '/'.join([self.get_url_location().rstrip('/'), url.lstrip('/')]) location = location.replace(access_path_dbl, self.access_path) url_parts = urlparse(location) query_args = parse_qsl(url_parts.query) query_args += options.items() if self.token is not None: query_args += { 'X-Plex-Token': self.token }.items() new_query_args = urlencode(query_args, True) return '%s|%s' % (urlunparse((url_parts.scheme, url_parts.netloc, url_parts.path.replace('//', '/'), url_parts.params, new_query_args, url_parts.fragment)), self.plex_identification_string)
def get_formatted_url(self, url, options=None): if options is None: options = {} url_options = self.plex_identification_header url_options.update(options) if url.startswith('http'): url_parts = urlparse(url) url = url_parts.path if url_parts.query: url = '?'.join([url, url_parts.query]) access_path_dbl = '/%s/%s/' % \ (self.access_path.replace('/', ''), self.access_path.replace('/', '')) location = '/'.join([self.get_url_location().rstrip('/'), url.lstrip('/')]) location = location.replace(access_path_dbl, self.access_path) url_parts = urlparse(location) query_args = parse_qsl(url_parts.query) query_args += url_options.items() new_query_args = urlencode(query_args, True) return urlunparse((url_parts.scheme, url_parts.netloc, url_parts.path.replace('//', '/'), url_parts.params, new_query_args, url_parts.fragment))
def download(title, img, url): from resources.lib.modules import control control.busy() import json if url is None: return try: import resolveurl url = resolveurl.resolve(url) except Exception: control.idle() xbmcgui.Dialog().ok(NAME, 'Download failed', 'Your service can\'t resolve this hoster', 'or Link is down') return try: headers = dict(parse_qsl(url.rsplit('|', 1)[1])) except: headers = dict('') content = re.compile(r'(.+?)\s+[\.|\(|\[]S(\d+)E\d+[\.|\)|\]]', re.I).findall(title) transname = title.translate(None, r'\/:*?"<>|').strip('.') transname = re.sub(r'\[.+?\]', '', transname) levels = ['../../../..', '../../..', '../..', '..'] if len(content) == 0: dest = control.setting('movie.download.path') dest = control.transPath(dest) for level in levels: try: control.makeFile(os.path.abspath(os.path.join(dest, level))) except: pass control.makeFile(dest) dest = os.path.join(dest, transname) control.makeFile(dest) else: dest = control.setting('tv.download.path') dest = control.transPath(dest) for level in levels: try: control.makeFile(os.path.abspath(os.path.join(dest, level))) except: pass control.makeFile(dest) tvtitle = re.sub(r'\[.+?\]', '', content[0]) transtvshowtitle = tvtitle.translate(None, r'\/:*?"<>|').strip('.') dest = os.path.join(dest, transtvshowtitle) control.makeFile(dest) dest = os.path.join(dest, 'Season %01d' % int(content[0][1])) control.makeFile(dest) ext = os.path.splitext(urlparse(url).path)[1][1:] if ext not in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4' dest = os.path.join(dest, transname + '.' + ext) headers = quote_plus(json.dumps(headers)) from resources.lib.modules import downloader control.idle() downloader.doDownload(url, dest, title, img, headers)
def fromUrl(self, url): urlInfo = urlparse(url) qs = urlInfo.query and urlInfo.query or '' kwargs = dict() options = dict() options['hosts'] = urlInfo.netloc options['topic'] = urlInfo.path.strip('/') for name, value in six.iteritems(dict(parse_qsl(qs))): if value: options[name] = value self.maxSize = options.pop('maxSize', 10000) self.lazyLimit = options.pop('lazyLimit', True) options.setdefault('group', self._GROUP_ID_ + '-{}'.format(id(self))) # options.setdefault('group') options.setdefault('client', self._CLIENT_ID_) if urlInfo.scheme != 'kafka': raise AttributeError('schema {} not supported'.format(urlInfo.scheme)) else: for name, value in six.iteritems(options): mirror = urlOptMaps.get(name) if mirror: value = mirror[1](value) if mirror == 'bootstrap_servers': value = value.split(',') kwargs[mirror[0]] = value else: kwargs[name] = value return kwargs, { 'hosts': options.pop('hosts', '').split(','), 'client_id': options.pop('client_id', self._CLIENT_ID_), 'timeout': options.pop('timeout', 120) }
def parse_url(url): if url.startswith('?'): params = dict(parse_qsl(url.lstrip('?'), keep_blank_values=True)) for key in params: params[key] = params[key] _url = params.pop(ROUTE_TAG, '') else: params = {} _url = url params[ROUTE_URL_TAG] = url params[ROUTE_RESUME_TAG] = len( sys.argv) > 3 and sys.argv[3].lower() == 'resume:true' function = _routes.get(_url) if not function: raise RouterError(_(_.ROUTER_NO_FUNCTION, raw_url=url, parsed_url=_url)) log('Router Parsed: \'{0}\' => {1} {2}'.format(url, function.__name__, params)) return function, params
def get_content(netloc, path, query=None): components = path.strip('/').split('/') + list( itertools.chain.from_iterable([ ('#' + key, value) for key, value in sorted(parse_qsl(query or '')) ])) path = None # Search for matching fixture current = os.path.join(FIXTURES_DIR, netloc) for component in components: current = os.path.join(current, component) if os.path.exists(current + '.json'): path = current + '.json' if not os.path.exists(current): break if not path: return None # Read fixture content with open(path, 'r') as fp: return fp.read()
def router(paramstring): # type: (Text) -> None """ Route addon calls :param paramstring: url-encoded query string :raises RuntimeError: on unknown call action """ params = dict(urllib_parse.parse_qsl(paramstring)) logger.debug('Called addon with params: {}'.format(sys.argv)) if 'pathSettings' not in params: logger.warning('path-specific settings are not supported') path_settings = json.loads(params.get('pathSettings')) or {} episode_order = get_episode_order(path_settings) default_rating = path_settings.get('default_rating') if default_rating is None: default_rating = ADDON.getSetting('default_rating') if params['action'] == 'find': find_show(params['title'], params.get('year')) elif params['action'].lower() == 'nfourl': get_show_id_from_nfo(params['nfo']) elif params['action'] == 'getdetails': get_details(params['url'], default_rating) elif params['action'] == 'getepisodelist': get_episode_list(params['url'], episode_order) elif params['action'] == 'getepisodedetails': get_episode_details(params['url'], episode_order) elif params['action'] == 'getartwork': get_artwork(params.get('id')) else: raise RuntimeError('Invalid addon call: {}'.format(sys.argv)) xbmcplugin.endOfDirectory(HANDLE)
def router(paramstring): """ Route addon calls :param paramstring: url-encoded query string :type paramstring: str :raises RuntimeError: on unknown call action """ params = dict(urllib_parse.parse_qsl(paramstring)) logger.debug('Called addon with params: {}'.format(sys.argv)) if params['action'] == 'find': find_show(params['title'], params.get('year')) elif params['action'].lower() == 'nfourl': get_show_from_nfo(params['nfo']) elif params['action'] == 'getdetails': get_details(params['url']) elif params['action'] == 'getepisodelist': get_episode_list(params['url']) elif params['action'] == 'getepisodedetails': get_episode_details(params['url']) elif params['action'] == 'getartwork': get_artwork(params['id']) else: raise RuntimeError('Invalid addon call: {}'.format(sys.argv)) xbmcplugin.endOfDirectory(HANDLE)
def build_destination_path(url): if isinstance(url, string_types): url = urlparse(url) elif not isinstance(url, ParseResult): raise ValueError( 'Invalid value provided for "url" parameter (expected string or urlparse result)' ) if not url.netloc or not url.path: print('[%s] Missing netloc or path' % (url, )) return False # Format query parameters parameters = sorted([(key, value) for key, value in parse_qsl(url.query) if key and value]) if parameters: query = os.path.join( *[os.path.join('#' + key, value) for key, value in parameters]) else: query = None # Build destination path if query: return os.path.abspath( os.path.join( os.path.join(CURRENT_DIR, url.netloc) + url.path, query) + '.json') return os.path.abspath( os.path.join(CURRENT_DIR, url.netloc) + url.path + '.json')
def query_callback(request): url = urlparse(request.url) query = dict(parse_qsl(url.query)) if not query.get('query'): return 400, {}, '[]' # Build path if query.get('year'): path = 'fixtures%s/%s/%s.json' % ( url.path, query['year'], query['query'] ) else: path = 'fixtures%s/%s.json' % ( url.path, query['query'] ) # Return response try: return 200, {}, read(path) except: return 200, {}, '[]'
def do_GET(self): query_s = urlparse(self.path).query form = dict(parse_qsl(query_s)) self.send_response(200) self.send_header("Content-Type", "text/html") self.end_headers() if "code" in form: self.server.auth_code = form["code"] self.server.error = None status = "successful" elif "error" in form: self.server.error = form["error"] self.server.auth_code = None status = "failed ({})".format(form["error"]) else: self._write("<html><body><h1>Invalid request</h1></body></html>") return self._write("""<html> <body> <h1>Authentication status: {}</h1> Now you can close this window or tab. </body> </html>""".format(status))
def callback(request): uri = urlparse(request.url) parameters = dict(parse_qsl(uri.query)) if parameters.get('extended') != 'episodes': return 400, {}, None return 200, {}, read('fixtures/shows/tt0944947/seasons_extended.json')
def play_vod(slug, **kwargs): data = api.content(slug) url = data['items'][1]['items'][0]['videoUrl'] parsed = urlparse(url) params = dict(parse_qsl(parsed.query)) return _play(params['accountId'], params['referenceId'], live=False)
def parse_auth_response_url(url): query_s = urlparse(url).query form = dict(parse_qsl(query_s)) if "error" in form: raise SpotifyOauthError("Received error from auth server: " "{}".format(form["error"]), error=form["error"]) return tuple(form.get(param) for param in ["state", "code"])
def post_mockreturn(self, method, url, data=None, timeout=10, files=None, params=None, headers=None, **kwargs): if method != 'POST': raise ValueError("A 'post request' was made with method != POST") datad = dict([urlparse.parse_qsl(d)[0] for d in data.split('\n')]) filename = data_path(VO_DATA[datad['-source']]) content = open(filename, "rb").read() return MockResponse(content, **kwargs)
def _get_url(url): url = url.rstrip('/').split('/')[-1] parsed = urlparse(url) params = dict(parse_qsl(parsed.query)) if not params or 'channel-id' not in params: return url return params['channel-id']
def get_request_token(self, callback_url): client = self._get_oauth_client() request_url = '{}?oauth_callback={}'.format( self._get_endpoint('oauth'), quote(callback_url)) resp, content = client.request(request_url, 'GET') request_token = dict(parse_qsl(content.decode('utf-8'))) return request_token
def mantis_login_hook(response, *args, **kwargs): """requests hook to automatically log into Mantis anonymously if needed. The ALSA bug tracker is the only tested Mantis installation that actually needs this. For ALSA bugs, the dance is like so: 1. We request bug 3301 ('jack sensing problem'): https://bugtrack.alsa-project.org/alsa-bug/view.php?id=3301 2. Mantis redirects us to: .../alsa-bug/login_page.php? return=%2Falsa-bug%2Fview.php%3Fid%3D3301 3. We notice this, rewrite the query, and skip to login.php: .../alsa-bug/login.php? return=%2Falsa-bug%2Fview.php%3Fid%3D3301& username=guest&password=guest 4. Mantis accepts our credentials then redirects us to the bug view page via a cookie test page (login_cookie_test.php) """ if response.status_code not in (301, 302, 303, 307): return response if 'Location' not in response.headers: return response url = response.headers['Location'] scheme, host, path, params, query, fragment = urlparse(url) # If we can, skip the login page and submit credentials directly. The # query should contain a 'return' parameter which, if our credentials # are accepted, means we'll be redirected back whence we came. In other # words, we'll end up back at the bug page we first requested. login_page = '/login_page.php' if path.endswith(login_page): path = path[:-len(login_page)] + '/login.php' query_list = [('username', 'guest'), ('password', 'guest')] query_list.extend(parse_qsl(query, True)) if not any(name == 'return' for name, _ in query_list): raise BugTrackerConnectError( url, ("Mantis redirected us to the login page " "but did not set a return path.")) query = urlencode(query_list, True) url = urlunparse((scheme, host, path, params, query, fragment)) # Previous versions of the Mantis external bug tracker fetched # login_anon.php in addition to the login.php method above, but none of # the Mantis installations tested actually needed this. For example, # the ALSA bugtracker actually issues an error "Your account may be # disabled" when accessing this page. For now it's better to *not* try # this page because we may end up annoying admins with spurious login # attempts. response.headers['Location'] = url return response
def _get_canonical_query(self, req): req_params = urlparse.parse_qsl(urlparse.urlparse(req.url).query, keep_blank_values=True) params = [] for key, val in sorted(req_params or []): params.append('='.join((urlparse.quote(key, safe='~-_.'), urlparse.quote(val, safe='~-_.')))) c_params = '&'.join(params) self.log.debug('canonical query: %s', c_params) return c_params
def __init__(self, url, **kwa): self.url = url urldata = urlparse.urlparse(url, **kwa) for key in self._components: val = getattr(urldata, key) setattr(self, key, val) self.query_str = urldata.query self.queryl = urlparse.parse_qs(urldata.query) self.query = dict(urlparse.parse_qsl(urldata.query))
def lists_request_failure(url, request): parameters = dict(parse_qsl(url.query)) page = try_convert(parameters.get('page'), int) or 1 # Return invalid response for page #2 if page == 2: return httmock.response(400, request=request) # Return page return lists(url, request)
def assert_url(url, expected_path, expected_query=None): __tracebackhide__ = True parsed = urlparse(url) query = dict(parse_qsl(parsed.query)) if parsed.path != expected_path: pytest.fail("url.path is %r, expected %r" % (parsed.path, expected_path)) if expected_query is not None and query != expected_query: pytest.fail('url.query is %r, expected %r' % (query, expected_query))
def play_channel(slug, **kwargs): data = api.video_player(slug) url = data['videoUrl'] parsed = urlparse(url) params = dict(parse_qsl(parsed.query)) item = _play(params['accountId'], params['referenceId'], live=True) item.label = data['posterImage']['altTag'] item.art = {'thumb': IMAGE_URL.format(url=data['posterImage']['url'], width=IMAGE_WIDTH)} return item
def add_url_params(url, params): """ Add GET params to provided URL being aware of existing. :param url: string of target URL :param params: dict containing requested params to be added :return: string with updated URL >> url = 'http://stackoverflow.com/test?answers=true' >> new_params = {'answers': False, 'data': ['some','values']} >> add_url_params(url, new_params) 'http://stackoverflow.com/test?data=some&data=values&answers=false' """ # Unquoting URL first so we don't loose existing args url = unquote(url) # Extracting url info parsed_url = urlparse(url) # Extracting URL arguments from parsed URL get_args = parsed_url.query # Converting URL arguments to dict parsed_get_args = dict(parse_qsl(get_args)) if isinstance(params, dict): # Merging URL arguments dict with new params parsed_get_args.update(params) # Bool and Dict values should be converted to json-friendly values # you may throw this part away if you don't like it :) parsed_get_args.update({ k: dumps(v) for k, v in parsed_get_args.items() if isinstance(v, (bool, dict)) }) # Converting URL argument to proper query string encoded_get_args = urlencode(parsed_get_args, doseq=True) elif isinstance(params, str): encoded_get_args = params else: raise TypeError # Creating new parsed result object based on provided with new # URL arguments. Same thing happens inside of urlparse. new_url = ParseResult( parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, encoded_get_args, parsed_url.fragment, ).geturl() return new_url
def add_url_args(url, **kwargs): parsed = urlparse(url) if parsed.scheme.lower() != 'plugin': return url params = dict(parse_qsl(parsed.query, keep_blank_values=True)) params.update(**kwargs) _url = params.pop(ROUTE_TAG, None) if not _url: return url return build_url(_url, _addon_id=parsed.netloc, **params)
def _update_path(self, url, options=None): if options is None: options = {} location = self.join_url(self.get_url_location(), url) parsed_url = urlparse(location) if parsed_url.query: options.update(dict(parse_qsl(parsed_url.query))) if options: return '?'.join([parsed_url.path, urlencode(options, True)]) return parsed_url.path
def test_combine_complex_filters_and_search(self): today = date.today() data = { 'reason__exact': str(AbuseReport.REASONS.OTHER), 'type': 'addon', 'q': 'Soap', 'created__gte': self.days_ago(100).date().isoformat(), 'created__lte': self.days_ago(97).date().isoformat(), 'modified__day': str(today.day), 'modified__month': str(today.month), 'modified__year': str(today.year), } response = self.client.get(self.list_url, data, follow=True) assert response.status_code == 200 doc = pq(response.content) assert doc('#result_list tbody tr').length == 1 # Also, the forms we used for the 'created' filters should contain all # active filters and search query so that we can combine them. forms = doc('#changelist-filter form') inputs = [(elm.name, elm.value) for elm in forms.find('input') if elm.name and elm.value != ''] assert set(inputs) == set(data.items()) # Same for the 'search' form form = doc('#changelist-filter form') inputs = [(elm.name, elm.value) for elm in form.find('input') if elm.name and elm.value != ''] assert set(inputs) == set(data.items()) # Gather selected filters. lis = doc('#changelist-filter li.selected') # We've got 5 filters, so usually we'd get 5 selected list items # (because of the "All" default choice) but since 'created' is actually # 2 fields, and we have submitted both, we now have 6 expected items. assert len(lis) == 6 assert lis.text().split() == [ 'Addons', 'All', 'Other', 'From:', 'To:', 'All' ] assert lis.eq(3).find('#id_created__gte') assert lis.eq(4).find('#id_created__lte') # The links used for 'normal' filtering should also contain all active # filters even our custom fancy ones. We just look at the selected # filters to keep things simple (they should have all parameters in # data with the same value just like the forms). links = doc('#changelist-filter li.selected a') for elm in links: parsed_href_query = parse_qsl(urlparse(elm.attrib['href']).query) assert set(parsed_href_query) == set(data.items())
def parse_connection_string(value): """Original Governor stores connection strings for each cluster members if a following format: postgres://{username}:{password}@{connect_address}/postgres Since each of our patroni instances provides own REST API endpoint it's good to store this information in DCS among with postgresql connection string. In order to not introduce new keys and be compatible with original Governor we decided to extend original connection string in a following way: postgres://{username}:{password}@{connect_address}/postgres?application_name={api_url} This way original Governor could use such connection string as it is, because of feature of `libpq` library. This method is able to split connection string stored in DCS into two parts, `conn_url` and `api_url`""" scheme, netloc, path, params, query, fragment = urlparse(value) conn_url = urlunparse((scheme, netloc, path, params, '', fragment)) api_url = ([v for n, v in parse_qsl(query) if n == 'application_name'] or [None])[0] return conn_url, api_url
def lists_invalid_json(url, request): parameters = dict(parse_qsl(url.query)) page = try_convert(parameters.get('page'), int) or 1 # Return invalid response for page #2 if page == 2: return httmock.response( 200, '<invalid-json-response>', { 'Content-Type': 'application/json' }, request=request ) # Return page return lists(url, request)
def fromUrl(self, url): urlInfo = urlparse(url) options = dict(parse_qsl(urlInfo.query)) self.name = options.pop('name', 'test') ignoreSuffix = options.pop('ignoreSuffix', False) self.suffix = options.pop('suffix', 'URL') try: ignoreSuffix = json.loads(ignoreSuffix) except: pass if not ignoreSuffix: self.name = self.name + ':' + self.suffix self.maxSize = options.pop('maxSize', 10000) self.lazyLimit = options.pop('lazyLimit', True) print('--options->: {} {} {} {}'.format(self.name, self.suffix, urlInfo, ignoreSuffix)) qs = urlencode(options) url = url.split('?')[0] + qs return url
def mangle_url_l(url, include=None, exclude=None, add=None, **kwargs): """ mangle_url with preserving as much as possible (order, multiple values, empty values). Additional keyword parameters are passed to url_replace. >>> from pyaux.dicts import MVOD >>> query = r'a=&a=1&a=true&b=null&b=undefined&b=&b=5' >>> urlencode(MVOD(urlparse.parse_qsl(query, keep_blank_values=1))) == query True """ from pyaux.dicts import MVOD url_parts = urlparse.urlparse(to_bytes(url)) query = MVOD(urlparse.parse_qsl(url_parts.query, keep_blank_values=1)) query_new = mangle_dict( query, include=include, exclude=exclude, add=add, _return_list=True) return url_replace(url, query=query_new, **kwargs)
def search_callback(request): url = urlparse(request.url) query = dict(parse_qsl(url.query)) if 'id' in query and 'id_type' in query: path = 'fixtures/search/lookup/%s/%s.json' % ( query.get('id_type'), query.get('id') ) else: path = 'fixtures/search/query/%s/%s/%s.json' % ( query.get('type', 'all'), query.get('year', 'all'), query.get('query') ) try: content = read(path) return 200, {}, content except: return 200, {}, '[]'
def urlparams(url_, hash=None, **query): """ Add a fragment and/or query parameters to a URL. New query params will be appended to existing parameters, except duplicate names, which will be replaced. """ url = django_urlparse(force_text(url_)) fragment = hash if hash is not None else url.fragment # Use dict(parse_qsl) so we don't get lists of values. q = url.query query_dict = dict(parse_qsl(force_text(q))) if q else {} query_dict.update( (k, force_bytes(v) if v is not None else v) for k, v in query.items()) query_string = urlencode( [(k, unquote_to_bytes(v)) for k, v in query_dict.items() if v is not None]) new = ParseResult(url.scheme, url.netloc, url.path, url.params, query_string, fragment) return new.geturl()
def build_destination_path(url): if isinstance(url, string_types): url = urlparse(url) elif not isinstance(url, ParseResult): raise ValueError('Invalid value provided for "url" parameter (expected string or urlparse result)') if not url.netloc or not url.path: print('[%s] Missing netloc or path' % (url,)) return False # Format query parameters parameters = sorted([ (key, value) for key, value in parse_qsl(url.query) if key and value ]) if parameters: query = os.path.join(*[ os.path.join('#' + key, value) for key, value in parameters ]) else: query = None # Build destination path if query: return os.path.abspath( os.path.join( os.path.join(CURRENT_DIR, url.netloc) + url.path, query ) + '.json' ) return os.path.abspath( os.path.join(CURRENT_DIR, url.netloc) + url.path + '.json' )
def data(self): """ TODO: What is the right way to do this? """ if not self.body: return self.body elif self.body is EMPTY: return EMPTY elif self.content_type and self.content_type.startswith('application/json'): try: if isinstance(self.body, six.binary_type): return json.loads(self.body.decode('utf-8')) else: return json.loads(self.body) except ValueError as e: if isinstance(e, JSONDecodeError): # this will only be True for Python3+ raise e raise JSONDecodeError(str(e)) elif self.content_type == 'application/x-www-form-urlencoded': return dict(urlparse.parse_qsl(self.body)) else: raise NotImplementedError("No parser for content type")
def callback(request): if authenticated and not is_authenticated(request.headers): return 401, {}, '' # Parse url url = urlparse(request.url) query = dict(parse_qsl(url.query)) # Retrieve parameters page = int(query.get('page', 1)) limit = int(query.get('limit', 10)) # Retrieve page items start = (page - 1) * limit end = page * limit items = collection[start:end] if not items: return 404, {}, '' # Return page response page_count = int(math.ceil( float(len(collection)) / limit )) return ( 200, { 'Content-Type': 'application/json', 'X-Pagination-Page': str(page), 'X-Pagination-Limit': str(limit), 'X-Pagination-Page-Count': str(page_count), 'X-Pagination-Item-Count': str(len(collection)) }, json.dumps(items) )
def get_content(netloc, path, query=None): components = path.strip('/').split('/') + list(itertools.chain.from_iterable([ ('#' + key, value) for key, value in sorted(parse_qsl(query or '')) ])) path = None # Search for matching fixture current = os.path.join(FIXTURES_DIR, netloc) for component in components: current = os.path.join(current, component) if os.path.exists(current + '.json'): path = current + '.json' if not os.path.exists(current): break if not path: return None # Read fixture content with open(path, 'r') as fp: return fp.read()
def url_to_querydict(url): """ A shorthand for getting an url's query as a dict """ url = to_bytes(url) return dict(urlparse.parse_qsl(urlparse.urlparse(url).query))
def _generate_helper( self, expiration=DEFAULT_EXPIRATION, api_access_endpoint="", method="GET", content_type=None, content_md5=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, ): now = datetime.datetime(2019, 2, 26, 19, 53, 27) resource = "/name/path" signer_email = "*****@*****.**" credentials = _make_credentials(signer_email=signer_email) credentials.sign_bytes.return_value = b"DEADBEEF" with mock.patch("google.cloud.storage._signing.NOW", lambda: now): url = self._call_fut( credentials, resource, expiration=expiration, api_access_endpoint=api_access_endpoint, method=method, content_type=content_type, content_md5=content_md5, response_type=response_type, response_disposition=response_disposition, generation=generation, headers=headers, query_parameters=query_parameters, ) # Check the mock was called. credentials.sign_bytes.assert_called_once() scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url) expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit( api_access_endpoint ) self.assertEqual(scheme, expected_scheme) self.assertEqual(netloc, expected_netloc) self.assertEqual(path, resource) self.assertEqual(frag, "") # Check the URL parameters. params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True)) self.assertEqual(params["X-Goog-Algorithm"], "GOOG4-RSA-SHA256") now_date = now.date().strftime("%Y%m%d") expected_cred = "{}/{}/auto/storage/goog4_request".format( signer_email, now_date ) self.assertEqual(params["X-Goog-Credential"], expected_cred) now_stamp = now.strftime("%Y%m%dT%H%M%SZ") self.assertEqual(params["X-Goog-Date"], now_stamp) self.assertEqual(params["X-Goog-Expires"], str(self.DEFAULT_EXPIRATION)) signed = binascii.hexlify(credentials.sign_bytes.return_value).decode("ascii") self.assertEqual(params["X-Goog-Signature"], signed) if response_type is not None: self.assertEqual(params["response-content-type"], response_type) if response_disposition is not None: self.assertEqual( params["response-content-disposition"], response_disposition ) if generation is not None: self.assertEqual(params["generation"], str(generation)) if query_parameters is not None: for key, value in query_parameters.items(): value = value.strip() if value else "" self.assertEqual(params[key].lower(), value)
def _generate_helper( self, api_access_endpoint="", method="GET", content_md5=None, content_type=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, ): from six.moves.urllib.parse import urlencode resource = "/name/path" credentials = _make_credentials(signer_email="*****@*****.**") credentials.sign_bytes.return_value = b"DEADBEEF" signed = base64.b64encode(credentials.sign_bytes.return_value) signed = signed.decode("ascii") expiration = 1000 url = self._call_fut( credentials, resource, expiration=expiration, api_access_endpoint=api_access_endpoint, method=method, content_md5=content_md5, content_type=content_type, response_type=response_type, response_disposition=response_disposition, generation=generation, headers=headers, query_parameters=query_parameters, ) # Check the mock was called. method = method.upper() if headers is None: headers = [] elif isinstance(headers, dict): headers = sorted(headers.items()) elements = [] expected_resource = resource if method == "RESUMABLE": elements.append("POST") headers.append(("x-goog-resumable", "start")) else: elements.append(method) if query_parameters is not None: normalized_qp = { key.lower(): value and value.strip() or "" for key, value in query_parameters.items() } expected_qp = urlencode(sorted(normalized_qp.items())) expected_resource = "{}?{}".format(resource, expected_qp) elements.append(content_md5 or "") elements.append(content_type or "") elements.append(str(expiration)) elements.extend(["{}:{}".format(*header) for header in headers]) elements.append(expected_resource) string_to_sign = "\n".join(elements) credentials.sign_bytes.assert_called_once_with(string_to_sign) scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url) expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit( api_access_endpoint ) self.assertEqual(scheme, expected_scheme) self.assertEqual(netloc, expected_netloc) self.assertEqual(path, resource) self.assertEqual(frag, "") # Check the URL parameters. params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True)) self.assertEqual(params["GoogleAccessId"], credentials.signer_email) self.assertEqual(params["Expires"], str(expiration)) self.assertEqual(params["Signature"], signed) if response_type is not None: self.assertEqual(params["response-content-type"], response_type) if response_disposition is not None: self.assertEqual( params["response-content-disposition"], response_disposition ) if generation is not None: self.assertEqual(params["generation"], str(generation)) if query_parameters is not None: for key, value in query_parameters.items(): value = value.strip() if value else "" self.assertEqual(params[key].lower(), value)
def url_query_filter(obj): parsed = parse_url(obj) qsl = list(filter(pred, parse_qsl(parsed.query))) filtered_query = urlencode(qsl) return urlunparse(parsed._replace(query=filtered_query))