Exemplo n.º 1
0
 def get_redirect_url(self):
     parsed = urlparse(self.service)
     query = parse_qs(parsed.query)
     query['ticket'] = [self.ticket]
     query = [((k, v) if len(v) > 1 else (k, v[0])) for k, v in query.items()]
     parsed = ParseResult(parsed.scheme, parsed.netloc,
                                   parsed.path, parsed.params,
                                   urlencode(query), parsed.fragment)
     return parsed.geturl()
Exemplo n.º 2
0
 def __init__(self, username=None, password=None, site=None, email=None):
     self.username = username
     self.password = password
     parse = list(urlparse(site if urlparse(site).scheme else
                  '%s%s' % ('//', site)))
     parse[0] = b'https'
     for index, value in enumerate(parse):
         parse[index] = value.decode('utf-8') if isinstance(value, bytes) \
             else value
     parse = ParseResult(*parse)
     self.url = parse.geturl()
     self.email = email
Exemplo n.º 3
0
 def parse(self, url, respar):
     res = urlparse(url)
     nl = res.path
     #print(res)
     if respar.path.rfind("/")>=0:
         nl = respar.path[0:respar.path.rfind("/")+1] + res.path
     res2 = ParseResult(scheme = res.scheme or respar.scheme,
                       netloc = res.netloc or respar.netloc,
                       path = nl,
                        params='', query='', fragment='')
     url = res2.geturl()
     #print("parse", url, res2, respar)
     return (url, res2)
Exemplo n.º 4
0
def download_key_http(address, port):
    url = ParseResult(
        scheme='http',
        # This seems to work well enough with both IPv6 and IPv4
        netloc="[[%s]]:%d" % (address, port),
        path='/',
        params='',
        query='',
        fragment='')
    log.debug("Starting HTTP request")
    data = requests.get(url.geturl(), timeout=5).content
    log.debug("finished downloading %d bytes", len(data))
    return data
Exemplo n.º 5
0
    def __init__(self, url):
        """Initialize the url object.

        Create a new Url object from either a well formed url string,
        a dict of key/values, or a ParseResult.

        Args:
            url (mixed): The value to generate the url from.
        """
        if isinstance(url, ParseResult):
            self._parts = url
        elif isinstance(url, dict):
            if 'hostname' in url and 'netloc' not in url:
                netloc = url.pop('hostname')
                if 'port' in url:
                    netloc += ':' + str(url.pop('port'))
                url['netloc'] = netloc
            if 'scheme' not in url:
                url['scheme'] = 'http'
            if 'username' in url:
                url['netloc'] = '{0}:{1}@{2}'.format(
                    url.pop('username'), url.pop('password', ''), url['netloc'])
            if 'params' not in url:
                url['params'] = None
            if 'fragment' not in url:
                url['fragment'] = None
            if 'path' not in url:
                url['path'] = '/'
            if 'query' not in url:
                url['query'] = None
            self._parts = ParseResult(**url)
        elif isinstance(url, str):
            self._parts = urlparse(url)
Exemplo n.º 6
0
 def url(self) -> str:
     """Returns the full url requested."""
     return urlunparse(
         ParseResult(
             self.scheme, self.host, self.path, '', self.query_string, self.fragment,
         ),
     )
Exemplo n.º 7
0
def make_test_headers_and_path(
        app: 'Quart',
        path: str,
        headers: Optional[Union[dict, CIMultiDict]]=None,
        query_string: Optional[dict]=None,
) -> Tuple[CIMultiDict, str]:
    """Make the headers and path with defaults for testing.

    Arguments:
        app: The application to test against.
        path: The path to request.
        headers: Initial headers to send.
        query_string: To send as a dictionary.
    """
    if headers is None:
        headers = CIMultiDict()
    elif isinstance(headers, CIMultiDict):
        headers = headers
    elif headers is not None:
        headers = CIMultiDict(headers)
    headers.setdefault('Remote-Addr', '127.0.0.1')
    headers.setdefault('User-Agent', 'Quart')
    headers.setdefault('host', app.config['SERVER_NAME'] or 'localhost')
    if query_string is not None:
        path = urlunparse(
            ParseResult(
                scheme='', netloc='', params='', path=path, query=urlencode(query_string),
                fragment='',
            ),
        )
    return headers, path  # type: ignore
Exemplo n.º 8
0
    def __Select_Driver(self, drivers_link: ParseResult) -> ResultSet:
        """
        __Select_Driver(drivers_link: ParseResult) -> ResultSet
        具体版本号驱动列表

        :param drivers_link: 具体版本号驱动链接
        :return: ResultSet
        """
        html = requests.get(url=drivers_link.geturl())
        for retry in range(3):
            if html.status_code == 200:
                break
            html = requests.get(url=drivers_link.geturl())
        html.encoding = html.apparent_encoding
        soup = BeautifulSoup(html.text, 'html.parser')
        return soup.pre.select(selector='a')
    def paginate_undl_results(self, items_per_page=25, limit=100):
        '''
        set a search term for digitallibrary
        and use the api (such as it is)
        to get total results back
        and to paginate through those
        results

        if @limit is set, stop after @limit
        '''
        count = 1
        if count == 1:
            yield self.url
        while count + items_per_page < limit:
            o = urlparse(self.url)
            query_dict = dict(parse_qsl(o.query))
            if 'jrec' in query_dict.keys():
                count = int(query_dict['jrec']) + items_per_page + 1
            else:
                count = items_per_page + 1
            query_dict.update({'jrec': count})
            query_dict = urlencode(query_dict, doseq=True)
            new_url = ParseResult(o.scheme, o.netloc, o.path, o.params,
                                  query_dict, o.fragment).geturl()
            yield new_url
            count += items_per_page
            self.url = new_url
            logger.debug(self.url)
Exemplo n.º 10
0
def clean_url(url: str) -> str:
    url = unquote(url)
    # Extracting url info
    parsed_url = urlparse(url)
    # Extracting URL arguments from parsed URL
    get_args = parsed_url.query
    # Converting URL arguments to dict
    parsed_get_args = dict(parse_qsl(get_args))
    # Merging URL arguments dict with new params
    parsed_get_args.pop('page', '')

    parsed_get_args.update({
        k: dumps(v)
        for k, v in parsed_get_args.items() if isinstance(v, (bool, dict))
    })

    # Converting URL argument to proper query string
    encoded_get_args = urlencode(parsed_get_args, doseq=True)
    # Creating new parsed result object based on provided with new
    # URL arguments. Same thing happens inside of urlparse.
    new_url = ParseResult(parsed_url.scheme, parsed_url.netloc,
                          parsed_url.path, parsed_url.params, encoded_get_args,
                          parsed_url.fragment).geturl()

    return new_url
Exemplo n.º 11
0
    def get_callback_url(self):
        from urllib.parse import urlparse, ParseResult
        import urllib.request, urllib.parse, urllib.error

        callback_url_parse = urlparse(self.callback)
        query_args = {
            'fwd_fee': self.fwd_miners_fee or 0,
            'input_fee': self.input_miners_fee or 0,
            'value': self.value,
            'input_address': self.input_address,
            'confirmations': self.confirmations,
            'transaction_hash': self.transaction_hash,
            'input_transaction_hash': self.input_transaction_hash,
            'destination_address': self.destination_address,
            'payee_addresses': self.payee_addresses
        }

        if not callback_url_parse.query:
            url_query = urllib.parse.urlencode(query_args)
        else:
            url_query = callback_url_parse.query + '&' + urllib.parse.urlencode(
                query_args)

        callback_url = ParseResult(
            scheme=callback_url_parse.scheme,
            netloc=callback_url_parse.netloc,
            path=callback_url_parse.path,
            params=callback_url_parse.params,
            query=url_query,
            fragment=callback_url_parse.fragment).geturl()

        return callback_url
Exemplo n.º 12
0
    def _fields_to_pr(cls, fields):
        """Recompose back fields dict to ParseResult"""
        netloc = fields['username'] or ''
        if fields['password']:
            netloc += ':' + fields['password']
        if netloc:
            netloc += '@'
        netloc += fields['hostname']
        if fields['port']:
            if fields['hostname'].count(':') >= 2:
                # ipv6 -- need to enclose in []
                netloc = '[%s]:%s' % (netloc, fields['port'])
            else:
                netloc += ':%s' % fields['port']

        pr_fields = {
            f: fields[f]
            for f in cls._FIELDS
            if f not in ('hostname', 'password', 'username', 'port')
        }
        pr_fields['netloc'] = netloc
        pr_fields['params'] = ''
        # We need to quote the path
        pr_fields['path'] = urlquote(pr_fields['path'])
        # TODO: figure out what to do with query/fragment... one step at a time
        return ParseResult(**pr_fields)
def device_auth(cmd):
    ts = openapi.timestamp()
    query_args = device_login_query_str(ts, cmd)
    query = urllib.parse.urlencode(query_args)
    parse_result = ParseResult(scheme=URL_SCHEME, netloc=URL_NETLOC, path=URL_PATH, query=query, params=URL_PARAMETER, fragment=URL_FRAGMENT)
    url = urlunparse(parse_result)
    return url
Exemplo n.º 14
0
def make_absolute_url(relative_url, absolute_base):
    """
    Create an absolute URL for selenium testing given a relative URL. This will also replace the host of absolute_base
    with the host IP of this instance.

    Args:
        relative_url (str): A relative URL
        absolute_base (str): An absolute URL which contains the http port and scheme we need

    Returns:
        str: An absolute URL pointing to the live server instance
    """
    # Swap out the hostname, which was set to 0.0.0.0 to allow external connections
    # Change it to use ip of this container instead
    absolute_pieces = urlparse(absolute_base)
    relative_pieces = urlparse(relative_url)
    host = socket.gethostbyname(socket.gethostname())
    return ParseResult(
        absolute_pieces.scheme,
        "{host}:{port}".format(host=host, port=absolute_pieces.port),
        relative_pieces.path,
        relative_pieces.params,
        relative_pieces.query,
        relative_pieces.fragment,
    ).geturl()
def get_chunk_tags(chunks: Dict, attrs: str):
    """
    Get tags for
    :param chunks:
    :param attrs:
    :return:
    """
    tags = []
    for chunk in chunks:
        resource_type = chunk['resource_type']
        original_url = chunk['url']

        parse_result = urlparse(original_url)
        path = parse_result.path
        # If under STATIC_URL rewrite using static tag so that we respect static file storage
        # options, eg. ManifestStaticFileStorage
        if settings.STATIC_URL and path.startswith(settings.STATIC_URL):
            try:
                path = static(path[len(settings.STATIC_URL):])
            except ValueError:
                # Allow url's that aren't managed by static files - eg. this will happen
                # for ManifestStaticFileStorage if file is not in the manifest
                pass
        url = ParseResult(**dict(parse_result._asdict(), path=path)).geturl()
        if resource_type == 'js':
            tags.append(f'<script type="text/javascript" src="{url}" {attrs}></script>')
        if resource_type == 'css':
            tags.append(f'<link type="text/css" href="{url}" rel="stylesheet" {attrs}/>')
    return tags
Exemplo n.º 16
0
    def apply_transformation(cls, parsed_url: ParseResult) -> ParseResult:
        """Apply the actual transformation process to the url."""
        assert parsed_url.hostname is not None  # mypy workaround

        new_domain = parsed_url.hostname.replace(".m.wikipedia.org",
                                                 ".wikipedia.org")
        return parsed_url._replace(netloc=new_domain)
Exemplo n.º 17
0
def fetch_resource_by_uuid(gatekeeper_endpoint: ParseResult,
                           workspace_dir: str, kind: Kind,
                           uuid: str) -> Dict[str, Any]:
    """Fetch a resource by its uuid. Return `None` if nothing is found.
    Raise a RuntimeError exception when a error is detected within the
    gatekeeper's API."""
    url = urljoin(gatekeeper_endpoint.geturl(),
                  os.path.join(_get_path_from_kind(kind), uuid))
    _LOGGER.info('Fetching a resource by uuid at %s', url)
    auth = 'Bearer ' + _get_workspace_token(workspace_dir)
    res_resp = requests.get(url,
                            headers={
                                'content-type': 'application/json',
                                'Authorization': auth
                            })
    try:
        res_resp.raise_for_status()
    except requests.exceptions.HTTPError as exc_notfound:
        _LOGGER.exception(
            'Failed to retrieve a resource at %s '
            '(status code = %d)', res_resp.url, res_resp.status_code)
        if exc_notfound.response.status_code == 404:
            return None
        else:
            raise
    tmp = res_resp.json()
    if not isinstance(tmp, dict) or len(tmp) <= 0:
        exc = RuntimeError('The returned json is malformed:  {}'.format(tmp))
        _LOGGER.error('Error while fetching a resource using an uuid: %s', exc)
        raise exc
    _LOGGER.info('Succeed to retrieve the resource %s (status code = %d)',
                 res_resp.url, res_resp.status_code)
    if kind.name in tmp:  # the resource is boxed
        tmp = tmp[kind.name]
    return tmp
Exemplo n.º 18
0
def get_safe_url(request, param_name, fallback_url=None):

    url = request.GET.get(param_name) or request.POST.get(param_name)

    allowed_hosts = settings.ALLOWED_HOSTS
    require_https = request.is_secure()

    if url:

        if settings.DEBUG:
            # In DEBUG mode the network location part `127.0.0.1:8000` contains
            # a port and fails the validation of `is_safe_url` since it's not a
            # member of `allowed_hosts`:
            # https://github.com/django/django/blob/23946bd/django/utils/http.py#L393
            # As a quick fix, we build a new URL without the port.
            # pylint: disable=C0415
            from urllib.parse import urlparse, ParseResult

            url_info = urlparse(url)
            url_without_port = ParseResult(
                scheme=url_info.scheme,
                netloc=url_info.hostname,
                path=url_info.path,
                params=url_info.params,
                query=url_info.query,
                fragment=url_info.fragment,
            ).geturl()
            if is_safe_url(url_without_port, allowed_hosts, require_https):
                return url

        else:
            if is_safe_url(url, allowed_hosts, require_https):
                return url

    return fallback_url
Exemplo n.º 19
0
    def validate_url(url, belongs_to=None):
        try:
            parsed_url = urlparse(url.rstrip('/'))
        except Exception:
            return False
        if parsed_url.scheme not in ('http', 'https', '') or \
                (not belongs_to and not parsed_url.netloc) or \
                (not parsed_url.netloc and not parsed_url.path):
            # skipping not http related resources (e.g. file:// etc) and empty urls
            return False

        if belongs_to:
            # make complete url out of relative/not full one
            # skip query string
            new_scheme = parsed_url.scheme if parsed_url.scheme else belongs_to.scheme
            new_netloc = parsed_url.netloc if parsed_url.netloc else belongs_to.netloc
            parsed_url = ParseResult(scheme=new_scheme,
                                     netloc=new_netloc,
                                     path=parsed_url.path,
                                     params='', query='', fragment='')

            if belongs_to.netloc.replace('www.', '') != parsed_url.netloc.replace('www.', ''):
                # given url does not belong to the root domain
                # skipping also subdomains as they can be really different resources
                #   containing their own sitemaps
                return False

        return parsed_url
Exemplo n.º 20
0
def add_url_params(url, params):
    """
    Add GET params to provided URL being aware of existing.

    :param url: string of target URL
    :param params: dict containing requested params to be added
    :return: string with updated URL

    >> url = 'http://stackoverflow.com/test?answers=true'
    >> new_params = {'answers': False, 'data': ['some','values']}
    >> add_url_params(url, new_params)
    'http://stackoverflow.com/test?data=some&data=values&answers=false'
    """
    # Unquoting URL first so we don't loose existing args
    url = unquote(url)
    # Extracting url info
    parsed_url = urlparse(url)
    # Extracting URL arguments from parsed URL
    get_args = parsed_url.query
    # Converting URL arguments to dict
    parsed_get_args = dict(parse_qsl(get_args))
    # Merging URL arguments dict with new params
    parsed_get_args.update(params)
    # Order the args
    ordered_args = get_ordered_args(parsed_get_args)
    # Converting URL argument to proper query string
    encoded_get_args = urlencode(ordered_args, doseq=True)
    # Creating new parsed result object based on provided with new
    # URL arguments. Same thing happens inside of urlparse.
    new_url = ParseResult(
        parsed_url.scheme, parsed_url.netloc, parsed_url.path,
        parsed_url.params, encoded_get_args, parsed_url.fragment
    ).geturl()

    return new_url
Exemplo n.º 21
0
def spoof_url(url: str, req_context: RequestContext) -> str:
    default_domain = req_context.target_parsed.netloc
    default_scheme = req_context.target_parsed.scheme

    def normalize_url(target_url: str):
        result = target_url
        result.replace('www.', '')  #? Remove www.

        if result.split(
                '/'
        )[0] == default_domain:  #? Check if first segment of 'path' is the default_domain
            result = f'{default_scheme}://' + result  #? Add http(s):// in front if it is a url like (www.google.com/path)
        return result

    targetUrl = urlparse(normalize_url(url))

    if targetUrl.scheme == '':
        targetUrl = targetUrl._replace(scheme=default_scheme)
    if targetUrl.netloc == '':
        targetUrl = targetUrl._replace(netloc=default_domain)

    spoofedTargetURL = urlunparse(targetUrl)

    spoofedURL = ParseResult(
        scheme=
        'http',  #? Always use http cause' I can't be bothered to run out and get a certificate
        netloc=req_context.own_host,
        path='/Request',
        query=qs_encode({'targetUrl': escape_url_for_qs(spoofedTargetURL)}),
        params=targetUrl.params,
        fragment=targetUrl.fragment)

    return urlunparse(spoofedURL)
Exemplo n.º 22
0
 def suggestions():
     """GET /suggestions end-point.
     
     Accepted Query Parameters:
     q: the prefix. 
     limit: the number of results by page.
     offset: the position of the first result in the list of all suggestions.
     """
     prefix = request.args.get('q', '')
     suggestions = autocompleter.suggest(prefix)
     total = len(suggestions)
     try:
         limit = int(request.args.get('limit', total))
         offset = int(request.args.get('offset', 0))
     except ValueError:
         abort(400)
     response = {
         'total': len(suggestions),
         'limit': limit,
         'offset': offset,
         'results': suggestions[offset:offset + limit]
     }
     if offset < total - limit:
         next_query = urlencode({'limit': limit, 'offset': offset + limit})
         next_url = urlunparse(
             ParseResult(scheme='',
                         netloc='',
                         path='suggestions',
                         params='',
                         query=next_query,
                         fragment=''))
         response['next'] = next_url
     return jsonify(response)
Exemplo n.º 23
0
def filter_query_params(url: str, parsed: up.ParseResult) -> up.ParseResult:
    """
    Remove the following query params from an URL:
     * ``sid=\w+``: SIDs are mostly used by magento to track the users when cookies are disabled
     * ``s=\w{32}``: same as SID, but from vBulletin sites
     * ``replytocom=\d+``: used by wordpress when clicking on "answer" from a comment

    :param url: the url
    :return: the url without sid and the likes
    """
    if any(l in parsed.query for l in ['s=', 'sid=', 'replytocom=']):
        # TODO: here, the behavior of parse is inconsistant/changes the URL
        # e.g.:
        #   >>> up.parse_qsl('a=%7E_%7E%3B')
        #   [('a', '~_~;')]
        #   >>> up.urlencode([('a', '~_~;')])
        #   'a=~_~%3B'
        qs = up.parse_qsl(parsed.query)
        new_qs = up.urlencode([
            q for q in qs
            if not ((q[0] == 'sid') or  # magento
                    (q[0] == 's' and len(q[1]) == 32) or  # vBulletin
                    (q[0] == 'replytocom')  # wordpress
                    )
        ])
        return parsed._replace(query=new_qs)
    return parsed
Exemplo n.º 24
0
    def _normalize_url(self, url):
        argnames = ['scheme', 'netloc', 'path', 'params', 'query', 'fragment']
        inheritable = ['scheme', 'netloc', 'path']
        parsed_current_url = urlparse(self.current_url)
        parsed_url = urlparse(url)

        args = []
        for argname in argnames:
            value = getattr(parsed_url, argname, None)
            if not value and argname in inheritable:
                value = getattr(parsed_current_url, argname, '')
            args.append(value)

        pr = ParseResult(*args)
        normalized = pr.geturl()
        return normalized
Exemplo n.º 25
0
async def qrcode_api(
    request: Request,
    *,
    text: str,
    size: int = 200,
    logo: Optional[HostUrl] = None,
    encode: ReturnEncode = ReturnEncode.raw,
    level: QRCodeLevel = QRCodeLevel.M,
    bgcolor: Color = Color("FFFFFF"),
    fgcolor: Color = Color("000000"),
    fun: str = "qrcode",
):
    qr = await QRInfo.new(
        text, size=size, logo=logo, level=level, bgcolor=bgcolor, fgcolor=fgcolor
    )
    qr.url = ParseResult(  # type:ignore
        scheme=request.url.scheme,
        netloc=request.url.netloc,
        path=f"temp/{qr.path.relative_to(TempFile.path)}",
        params="",
        query="",
        fragment="",
    ).geturl()
    return (
        qr
        if encode == ReturnEncode.json
        else Response(headers={"Location": qr.url}, status_code=302)
        if encode == ReturnEncode.raw
        else Response(content=f"{fun}({qr.json()})", media_type="text/javascript")
    )
Exemplo n.º 26
0
 def url(self) -> str:
     """Returns the full url requested."""
     return urlunparse(
         ParseResult(
             self.scheme, self.host, self.path, '', self.query_string.decode('ascii'), '',
         ),
     )
Exemplo n.º 27
0
def fetch_resource_by_uuid(gatekeeper_endpoint: ParseResult,
                           workspace_dir: str, kind: Kind,
                           uuid: str) -> Dict[str, Any]:
    """Fetch a resource by its uuid. Return `None` if nothing is found.
    Raise a RuntimeError exception when a error is detected within the
    gatekeeper's API."""
    url = urljoin(gatekeeper_endpoint.geturl(),
                  os.path.join(_get_path_from_kind(kind), uuid))
    _LOGGER.info('Fetching a resource by uuid at %s', url)
    auth = 'Bearer ' + _get_workspace_token(workspace_dir)
    res_resp = requests.get(url, headers={'content-type': 'application/json',
                                          'Authorization': auth})
    try:
        res_resp.raise_for_status()
    except requests.exceptions.HTTPError as exc_notfound:
        _LOGGER.exception('Failed to retrieve a resource at %s '
                          '(status code = %d)', res_resp.url,
                          res_resp.status_code)
        if exc_notfound.response.status_code == 404:
            return None
        else:
            raise
    tmp = res_resp.json()
    if not isinstance(tmp, dict) or len(tmp) <= 0:
        exc = RuntimeError('The returned json is malformed:  {}'.format(tmp))
        _LOGGER.error('Error while fetching a resource using an uuid: %s', exc)
        raise exc
    _LOGGER.info('Succeed to retrieve the resource %s (status code = %d)',
                 res_resp.url, res_resp.status_code)
    if kind.name in tmp:  # the resource is boxed
        tmp = tmp[kind.name]
    return tmp
Exemplo n.º 28
0
def handle_authcode(request, client, redirection_uri, state=None):
    parts = urlparse(redirection_uri.uri)
    qparams = dict(parse_qsl(parts.query))

    user_id = authenticated_userid(request)
    auth_code = Oauth2Code(client, user_id)
    db.add(auth_code)
    db.flush()

    qparams['code'] = auth_code.authcode
    if state:
        qparams['state'] = state
    parts = ParseResult(
        parts.scheme, parts.netloc, parts.path, parts.params,
        urlencode(qparams), '')
    return HTTPFound(location=parts.geturl())
Exemplo n.º 29
0
def fetch_gitlab_versions(url: ParseResult) -> List[Version]:
    match = GITLAB_API.match(url.geturl())
    if not match:
        return []
    domain = match.group("domain")
    project_id = match.group("project_id")
    gitlab_url = f"https://{domain}/api/v4/projects/{project_id}/repository/tags"
    info(f"fetch {gitlab_url}")
    resp = urllib.request.urlopen(gitlab_url)
    json_tags = json.loads(resp.read())
    if len(json_tags) == 0:
        raise VersionError("No git tags found")
    releases = []
    tags = []
    for tag in json_tags:
        name = tag["name"]
        assert isinstance(name, str)
        if tag["release"]:
            # TODO: has gitlab preleases?
            releases.append(Version(name))
        else:
            tags.append(Version(name))
    # if no release is found, use latest tag
    if releases == []:
        return tags
    return releases
Exemplo n.º 30
0
 def generate_provider_url(self,
                           schema="dubbo",
                           host="127.0.0.1",
                           port="20880",
                           sapplication=None,
                           service=None,
                           methods=None,
                           validate=True,
                           defaultFilter=None,
                           dubbo_v="2.6.8"):
     if methods is None:
         methods = []
     query_map = {
         "applicaton": sapplication,
         "bean.name": service,
         "default.service.filter": defaultFilter,
         "default.validation": validate,
         "dubbo": dubbo_v,
         "generic": False,
         "interface": service,
         "methods": ",".join(methods),
         "side": "provider",
         "timestamp": self.timestamp
     }
     query = urlencode(query_map)
     f = ParseResult(scheme=schema,
                     netloc='%s:%s' % (host, port),
                     path=service,
                     params='',
                     query=query,
                     fragment='')
     return urlunparse(f)
Exemplo n.º 31
0
def response_url_redirect(url, **kwargs):
    parsed_url = urlparse(url)

    query_list = parse_qsl(parsed_url.query)
    for key, value in kwargs.items():
        query_list.append((key, value))

    new_url = ParseResult(parsed_url.scheme,
                          parsed_url.netloc,
                          parsed_url.path,
                          parsed_url.params,
                          urlencode(query_list),
                          parsed_url.fragment)

    return skygear.Response(status=302,
                            headers=[('Location', new_url.geturl())])
Exemplo n.º 32
0
    def apply_transformation(cls, parsed_url: ParseResult) -> ParseResult:
        """Apply the actual transformation process to the url."""
        query_params = parse_qs(parsed_url.query, keep_blank_values=True)

        query_params.pop("fbclid", None)

        return parsed_url._replace(query=urlencode(query_params, doseq=True))
Exemplo n.º 33
0
def _limit_and_offset(uri, limit=None, offset=None):
    """
    Set limit and/or offset query parameters of the given URI.
    """
    parts = urlparse(uri)
    query = parse_qs(parts.query)
    if limit is None:
        query.pop('limit', None)
    else:
        query['limit'] = limit
    if offset is None:
        query.pop('offset', None)
    else:
        query['offset'] = offset

    # in Python 2, urllib expects encoded byte-strings
    if six.PY2:
        new_query = {}
        for k, v in query.items():
            if isinstance(v, list):
                v = [unicode(element).encode('utf-8') for element in v]
            elif isinstance(v, str):
                v = unicode(v).encode('utf-8')
            new_query[unicode(k).encode('utf-8')] = v
        query = new_query

    new_query_string = urlencode(query, doseq=True)
    return urlunparse(
        ParseResult(scheme=parts.scheme,
                    netloc=parts.netloc,
                    path=parts.path,
                    params=parts.params,
                    query=new_query_string,
                    fragment=parts.fragment))
Exemplo n.º 34
0
def get_redacted_url(result: ParseResult) -> ParseResult:
    if result.password:
        username: str = result.username
        hostname: str = result.hostname
        result = result._replace(
            netloc=f"{username}:{'*' * len(result.password)}@{hostname}")
    return result
Exemplo n.º 35
0
    def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
        """Initialize the data object."""
        super().__init__(hass,
                         _LOGGER,
                         name="SolarLog",
                         update_interval=timedelta(seconds=60))

        host_entry = entry.data[CONF_HOST]

        url = urlparse(host_entry, "http")
        netloc = url.netloc or url.path
        path = url.path if url.netloc else ""
        url = ParseResult("http", netloc, path, *url[3:])
        self.unique_id = entry.entry_id
        self.name = entry.title
        self.host = url.geturl()
Exemplo n.º 36
0
Arquivo: url.py Projeto: beralt/caldav
    def join(self, path):
        """
        assumes this object is the base URL or base path.  If the path
        is relative, it should be appended to the base.  If the path
        is absolute, it should be added to the connection details of
        self.  If the path already contains connection details and the
        connection details differ from self, raise an error.
        """
        pathAsString = str(path)
        if not path or not pathAsString:
            return self
        path = URL.objectify(path)
        if ((path.scheme and self.scheme and path.scheme != self.scheme)
                or (path.hostname and self.hostname
                    and path.hostname != self.hostname)
                or (path.port and self.port and path.port != self.port)):
            raise ValueError("%s can't be joined with %s" % (self, path))

        if path.path[0] == '/':
            ret_path = uc2utf8(path.path)
        else:
            sep = "/"
            if self.path.endswith("/"):
                sep = ""
            ret_path = "%s%s%s" % (self.path, sep, uc2utf8(path.path))
        return URL(
            ParseResult(self.scheme or path.scheme, self.netloc or path.netloc,
                        ret_path, path.params, path.query, path.fragment))
Exemplo n.º 37
0
    def dispatch(self, request, *args, **kwargs):
        if not test_func_email_required(request.user):
            messages.add_message(
                request,
                messages.INFO,
                "You need to have an email address on file before you can do that.",
            )

            # Remember where they were trying to go, so we can redirect them
            # back after they agree. There's logic in TermsView to pick up on
            # this parameter.
            next_path = request.path
            next_param = urlencode({REDIRECT_FIELD_NAME: next_path})
            path = reverse_lazy("users:email_change")
            new_url = ParseResult(
                scheme="",
                netloc="",
                path=str(path),
                params="",
                query=str(next_param),
                fragment="",
            ).geturl()
            return HttpResponseRedirect(new_url)

        return super(EmailRequired, self).dispatch(request, *args, **kwargs)
Exemplo n.º 38
0
def get_direct_url(url, headers):
    """Gets the zip direct download link from the project download page"""
    direct_download_url = href_from_link_text(url,
                                              headers,
                                              'Problems Downloading')
    parsed_download_url = urlparse(direct_download_url)
    if parsed_download_url.scheme not in ['http', 'https']:
        # url is relative, and is missing the scheme and netloc
        parsed_parent_url = urlparse(url)
        parsed_download_url = ParseResult(parsed_parent_url.scheme,
                                          parsed_parent_url.netloc,
                                          parsed_download_url.path,
                                          parsed_download_url.params,
                                          parsed_download_url.query,
                                          parsed_download_url.fragment)
        direct_download_url = parsed_download_url.geturl()
    direct_download_url = href_from_link_text(direct_download_url,
                                              headers,
                                              'direct link')
    return direct_download_url
Exemplo n.º 39
0
def fetch_resource(gatekeeper_endpoint: ParseResult, workspace_dir: str,
                   kind: Kind, vendor: str, name: str,
                   version: str) -> Tuple[str, Dict[str, Any]]:
    """Fetch a resource and return the Json as a dictionary. Return `None` if
     nothing is found. It raise a RuntimeError exception when a gatekeeper API
     is dectected"""
    url = urljoin(gatekeeper_endpoint.geturl(), _get_path_from_kind(kind))
    _LOGGER.info('Fetching a %s resource by name at %s', kind, url)
    query_params_raw = {'vendor': vendor,  # Dict[Str, Str]
                        'name': name,
                        'version': version}
    # We force the order of the query's parameters to lower the impact on tests
    # when a key is added or removed
    query_params = collections.OrderedDict(sorted(query_params_raw.items()))
    auth = 'Bearer ' + _get_workspace_token(workspace_dir)
    res_resp = requests.get(url, params=query_params,
                            headers={'content-type': 'application/json',
                                     'Authorization': auth})
    try:
        res_resp.raise_for_status()
    except requests.exceptions.HTTPError:
        _LOGGER.exception('Failed to retrieve a resource at %s '
                          '(status code = %d)', res_resp.url,
                          res_resp.status_code)
        # REMARK: if nothing is found, then the API return an empty [] and
        # not 404
        raise
    tmp = res_resp.json()
    if not isinstance(tmp, list):
        exc = RuntimeError('The returned json is not boxed by a list')
        _LOGGER.error('The GK API must return a list of resources: %s', exc)
        raise exc
    _LOGGER.info('Succeed to retrieve the resource %s (status code = %d): %s',
                 res_resp.url, res_resp.status_code, tmp[:20])
    for elt in tmp:
        this_uuid = elt['uuid']
        if not this_uuid:
            _LOGGER.warning('Ignoring an element without id or uuid: %s', elt)
            continue
        if kind.name in elt:  # the resource is boxed
            elt = elt[kind.name]
        if all([elt['vendor'] == vendor, elt['name'] == name,
                elt['version'] == version]):
            return (this_uuid, elt)
    return None
Exemplo n.º 40
0
    def __init__(self, parsed: ParseResult = None):
        self.parsed = parsed
        if parsed is None:
            self.kind = TaskCreationType.Empty
            self.url = None
            return

        path = parsed.path
        scheme = parsed.scheme

        url = parsed.geturl()

        # subtaskInfo is set by Model
        self.subtaskInfo = []

        self.url = url
        self.path = None
        self.kind = None
        if path.endswith(".torrent"):
            if scheme == "":
                self.kind = TaskCreationType.LocalTorrent
                return
            elif scheme in ("http", "https", "ftp"):
                self.kind = TaskCreationType.RemoteTorrent
                return

        if path.endswith(".metalink") or path.endswith(".meta4"):
            if scheme in ("http", "https", "ftp"):
                self.kind = TaskCreationType.MetaLink

        elif scheme == "ed2k":
            self.kind = TaskCreationType.Emule

        elif scheme == "magnet":
            self.kind = TaskCreationType.Magnet

        elif scheme in ("http", "https", "ftp"):
            self.kind = TaskCreationType.Normal
Exemplo n.º 41
0
def batch_raw_query(prometheus_endpoint: ParseResult,
                    start_timestamp: int,
                    end_timestamp: int,
                    step: datetime.timedelta,
                    query: str,
                    maxpts=11000) -> Iterable[bytes]:
    """Retrieve metrics from a Prometheus database"""
    sstep = '{}s'.format(int(step.total_seconds()))
    url = urljoin(prometheus_endpoint.geturl(), 'api/v1/query_range')

    def sub(sub_start, sub_end):
        """sub"""
        payload = [('start', sub_start),
                   ('end', sub_end),
                   ('step', sstep),
                   ('query', query)]
        req = requests.get(url, params=payload)
        return req.content
    delta = end_timestamp - start_timestamp
    batch_size = min(delta // int(step.total_seconds()), maxpts)  # type: int
    for limits in _create_batches(start_timestamp, end_timestamp, batch_size):
        sub_start, sub_end = limits
        yield sub(sub_start, sub_end)
Exemplo n.º 42
0
class Url(object):

    """An object based representation of a Url.
    """
    @property
    def scheme(self):
        return self._parts.scheme

    @property
    def netloc(self):
        return self._parts.netloc

    @property
    def hostname(self):
        return self._parts.hostname

    @property
    def subdomain(self):
        """
        Returns the subdomain for the URL.
        With thanks: http://stackoverflow.com/questions/1189128/regex-to-extract-subdomain-from-url
        """
        regex = r'(?:http[s]*\:\/\/)*(.*?)\.(?=[^\/]*\..{2,5})'
        hostname = self.hostname
        if not hostname:
            return None
        matches = re.match(regex, hostname)
        return matches.group(1) if matches else None

    @property
    def port(self):
        return self._parts.port

    @property
    def path(self):
        return self._parts.path

    def path_index(self, index=0):
        try:
            split_path = self.path.strip('/').split('/')
            return split_path[index]
        except:
            return None

    @property
    def params(self):
        return self._parts.params

    @property
    def query(self):
        return self._parts.query

    @property
    def fragment(self):
        return self._parts.fragment

    @property
    def username(self):
        return self._parts.username

    @property
    def password(self):
        return self._parts.password

    def __init__(self, url):
        """Initialize the url object.

        Create a new Url object from either a well formed url string,
        a dict of key/values, or a ParseResult.

        Args:
            url (mixed): The value to generate the url from.
        """
        if isinstance(url, ParseResult):
            self._parts = url
        elif isinstance(url, dict):
            if 'hostname' in url and 'netloc' not in url:
                netloc = url.pop('hostname')
                if 'port' in url:
                    netloc += ':' + str(url.pop('port'))
                url['netloc'] = netloc
            if 'scheme' not in url:
                url['scheme'] = 'http'
            if 'username' in url:
                url['netloc'] = '{0}:{1}@{2}'.format(
                    url.pop('username'), url.pop('password', ''), url['netloc'])
            if 'params' not in url:
                url['params'] = None
            if 'fragment' not in url:
                url['fragment'] = None
            if 'path' not in url:
                url['path'] = '/'
            if 'query' not in url:
                url['query'] = None
            self._parts = ParseResult(**url)
        elif isinstance(url, str):
            self._parts = urlparse(url)

    def assemble(self):
        return self._parts.geturl()

    def __str__(self):
        return self.assemble()
Exemplo n.º 43
0
    def __init__(self,
                 username,
                 password,
                 site='https://tutor-qa.openstax.org',
                 email=None,
                 email_username=None,
                 email_password=None,
                 driver_type='chrome',
                 capabilities=None,
                 pasta_user=None,
                 wait_time=DEFAULT_WAIT_TIME,
                 opera_driver='',
                 existing_driver=None,
                 **kwargs):
        """
        Base user constructor.

        username (string): website username
        password (string): website password
        site (string): website URL
        driver_type (string): web browser type
        pasta_user (PastaSauce): optional API access for saucelabs
        capabilities (dict): browser settings; copy object to avoid overwrite
            Defaults:
                DesiredCapabilities.ANDROID.copy()
                DesiredCapabilities.CHROME.copy()
                DesiredCapabilities.EDGE.copy()
                DesiredCapabilities.FIREFOX.copy()
                DesiredCapabilities.HTMLUNIT.copy()
                DesiredCapabilities.HTMLUNITWITHJS.copy()
                DesiredCapabilities.INTERNETEXPLORER.copy()
                DesiredCapabilities.IPAD.copy()
                DesiredCapabilities.IPHONE.copy()
                DesiredCapabilities.ORERA.copy()
                DesiredCapabilities.PHANTOMJS.copy()
                DesiredCapabilities.SAFARI.copy()
            Keys:
                platform
                browserName
                version
                javascriptEnabled
        wait (int): standard time, in seconds, to wait for Selenium commands
        opera_driver (string): Chromium location
        """
        self.username = username
        self.password = password
        parse = list(
            urlparse(
                site if urlparse(site).scheme
                else '%s%s' % ('//', site)
            )
        )
        parse[0] = b'https'
        for index, value in enumerate(parse):
            parse[index] = value.decode('utf-8') if isinstance(value, bytes) \
                else value
        parse = ParseResult(*parse)
        self.url = parse.geturl()
        self.email = email
        self.email_username = email_username
        self.email_password = email_password
        super(User, self).__init__(driver_type=driver_type,
                                   capabilities=capabilities,
                                   pasta_user=pasta_user,
                                   wait_time=wait_time,
                                   opera_driver=opera_driver,
                                   existing_driver=existing_driver,
                                   **kwargs)