예제 #1
0
def get_canonical_and_alternates_urls(url, drop_ln=True, washed_argd=None, quote_path=False):
    """
    Given an Invenio URL returns a tuple with two elements. The first is the
    canonical URL, that is the original URL with CFG_SITE_URL prefix, and
    where the ln= argument stripped. The second element element is mapping,
    language code -> alternate URL

    @param quote_path: if True, the path section of the given C{url}
                       is quoted according to RFC 2396
    """
    dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse(url)
    canonical_scheme, canonical_netloc = urlparse(cfg.get("CFG_SITE_URL"))[0:2]
    parsed_query = washed_argd or parse_qsl(query)
    no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != "ln"]
    if drop_ln:
        canonical_parsed_query = no_ln_parsed_query
    else:
        canonical_parsed_query = parsed_query
    if quote_path:
        path = urllib.quote(path)
    canonical_query = urlencode(canonical_parsed_query)
    canonical_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment))
    alternate_urls = {}
    for ln in cfg.get("CFG_SITE_LANGS"):
        alternate_query = urlencode(no_ln_parsed_query + [("ln", ln)])
        alternate_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment))
        alternate_urls[ln] = alternate_url
    return canonical_url, alternate_urls
예제 #2
0
    def _secure_request(self, url, method, data=None, files=None, headers=None,
                        raw=False, send_as_json=True, content_type=None,
                        **request_kwargs):

        full_url = self.build_url(url)

        # Add token (if it's not already there)
        if self._token:
            parsed = list(urlparse(full_url))
            if not parsed[4]:  # query
                parsed[4] = 'token=%s' % self._token
                full_url = urlunparse(parsed)
            elif 'token' not in parse_qs(parsed[4]):
                parsed[4] += '&token=%s' % self._token
                full_url = urlunparse(parsed)
        headers = headers or {}

        # If files are being sent, we cannot encode data as JSON
        if send_as_json and not files:
            headers['content-type'] = 'application/json'
            data = json.dumps(data or {})
        else:
            if content_type:
                headers['content-type'] = content_type
            data = data or ''

        method = getattr(requests, method, None)
        response = method(full_url, data=data, files=files, headers=headers,
                          **request_kwargs)
        self.check_for_errors(response)  # Raise exception if something failed

        if raw or not response.content:
            return response.content
        return json.loads(response.text)
예제 #3
0
def get_commit_url(commit, pkg):
    try:
        upstream_url = parse.urlsplit(pkg["upstream"])

        if upstream_url.netloc == "git.openstack.org":
            commit_url = ("http",
                          upstream_url.netloc,
                          "/cgit%s/commit/?id=" % upstream_url.path,
                          "", "", "")
            commit_url = parse.urlunparse(commit_url)
        elif upstream_url.netloc == "github.com":
            commit_url = ("https",
                          upstream_url.netloc,
                          "%s/commit/" % upstream_url.path,
                          "", "", "")
            commit_url = parse.urlunparse(commit_url)
        else:
            # Fallback when no cgit URL can be defined
            commit_url = pkg["upstream"]
    except KeyError:
        # This should not happen, but pkg['upstream'] may not be present
        # after some error in the gitrepo driver
        commit_url = ''

    return commit_url
예제 #4
0
    def base_url(self, filters, auth_data=None):
        """Base URL from catalog

        Filters can be:
        - service: compute, image, etc
        - region: the service region
        - endpoint_type: adminURL, publicURL, internalURL
        - api_version: replace catalog version with this
        - skip_path: take just the base URL
        """
        if auth_data is None:
            auth_data = self.auth_data
        token, _auth_data = auth_data
        service = filters.get('service')
        region = filters.get('region')
        endpoint_type = filters.get('endpoint_type', 'publicURL')

        if service is None:
            raise exceptions.EndpointNotFound("No service provided")

        _base_url = None
        for ep in _auth_data['serviceCatalog']:
            if ep["type"] == service:
                for _ep in ep['endpoints']:
                    if region is not None and _ep['region'] == region:
                        _base_url = _ep.get(endpoint_type)
                if not _base_url:
                    # No region matching, use the first
                    _base_url = ep['endpoints'][0].get(endpoint_type)
                break
        if _base_url is None:
            raise exceptions.EndpointNotFound(
                "service: %s, region: %s, endpoint_type: %s" %
                (service, region, endpoint_type))

        parts = urlparse.urlparse(_base_url)
        if filters.get('api_version', None) is not None:
            version_path = '/%s' % filters['api_version']
            path = re.sub(r'(^|/)+v\d+(?:\.\d+)?',
                          version_path,
                          parts.path,
                          count=1)
            _base_url = urlparse.urlunparse((parts.scheme,
                                             parts.netloc,
                                             path or version_path,
                                             parts.params,
                                             parts.query,
                                             parts.fragment))
        if filters.get('skip_path', None) is not None and parts.path != '':
            _base_url = urlparse.urlunparse((parts.scheme,
                                             parts.netloc,
                                             '/',
                                             parts.params,
                                             parts.query,
                                             parts.fragment))

        return _base_url
예제 #5
0
 def output(self, key, obj):
     try:
         data = to_marshallable_type(obj)
         endpoint = self.endpoint if self.endpoint is not None else request.endpoint
         o = urlparse(url_for(endpoint, _external=self.absolute, **data))
         if self.absolute:
             scheme = self.scheme if self.scheme is not None else o.scheme
             return urlunparse((scheme, o.netloc, o.path, "", "", ""))
         return urlunparse(("", "", o.path, "", "", ""))
     except TypeError as te:
         raise MarshallingError(te)
예제 #6
0
    def decorate(self, *args, **kwargs):
        try:
            return func(self, *args, **kwargs)
        except exc.OAuthException as o_exc:

            # Extract the parameters
            error = o_exc.error
            error_description = o_exc.msg or _("No details available.")

            # If we have a redirect URL, build the error redirect.
            if o_exc.redirect_uri:
                # Split the redirect_url apart
                parts = urlparse(o_exc.redirect_uri)

                # Add the error and error_description
                if parts.query:
                    params = urlparse.parse_qsl(parts.query)
                else:
                    params = []
                params.append(('error', error))
                params.append(('error_description', error_description))

                # Overwrite the old query params and reconstruct the URL
                parts_list = list(parts)
                parts_list[4] = urlencode(params)
                location = urlunparse(parts_list)

                redirect(location)
            else:
                error_body = {
                    'error': error,
                    'error_description': error_description
                }
                response.json = error_body
                abort(o_exc.code, error_description, json_body=error_body)
예제 #7
0
    def POST(self, endpoint, data=None):
        if data is None:
            data = dict()

        response = requests.post(urlunparse((self.scheme, self.host, endpoint, '', '', '')), headers={'Content-Type': 'application/json'}, json=data, auth=self.auth)
        response.raise_for_status()
        return makeresponse(response)
예제 #8
0
파일: utils.py 프로젝트: ForkRepo/sentry
def url_add_parameters(url, params):
    """Adds parameters to URL, parameter will be repeated if already present"""
    if params:
        fragments = list(urlparse(url))
        fragments[4] = urlencode(parse_qsl(fragments[4]) + params.items())
        url = urlunparse(fragments)
    return url
예제 #9
0
    def _parse_contents(self, response):
        # Wix pages aren't really parseable, so anytime we see them,
        # let's re-run it (depth-1) with an escaped-fragment to get the real html source
        if 'https://static.wixstatic.com/' in response.body and '_escaped_fragment_' not in response.url:
            parsed_url = urlparse(response.url)
            qs = parse_qs(parsed_url.query)
            qs['_escaped_fragment_'] = ''
            wix_scrapeable_url = urlunparse(
                (parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, urlencode(qs), parsed_url.fragment)
            )
            response.meta['depth'] -= 1
            return [scrapy.Request(wix_scrapeable_url, self.parse)]

        return
        if not hasattr(response, 'selector'):
            logging.info('Skipping unknown file from: %s', response.url)
            return
        # Get all text contents of tags (unless they are script or style tags)
        text_contents = ' '.join(response.selector.xpath('//*[not(self::script|self::style)]/text()').extract()).lower()

        processed_text = grammar_matcher.StringProcessor(text_contents, regex_keywords.WORD_BOUNDARIES)
        wrong = processed_text.get_tokens(all_styles.DANCE_WRONG_STYLE)
        good = processed_text.get_tokens(rules.STREET_STYLE)
        if (wrong or good):
            #print response.url, set(wrong), set(good)
            pass
예제 #10
0
def _build_url(url, _params):
    """Build the actual URL to use."""

    # Support for unicode domain names and paths.
    scheme, netloc, path, params, query, fragment = urlparse(url)
    netloc = netloc.encode('idna').decode('utf-8')
    if not path:
        path = '/'

    if six.PY2:
        if isinstance(scheme, six.text_type):
            scheme = scheme.encode('utf-8')
        if isinstance(netloc, six.text_type):
            netloc = netloc.encode('utf-8')
        if isinstance(path, six.text_type):
            path = path.encode('utf-8')
        if isinstance(params, six.text_type):
            params = params.encode('utf-8')
        if isinstance(query, six.text_type):
            query = query.encode('utf-8')
        if isinstance(fragment, six.text_type):
            fragment = fragment.encode('utf-8')

    enc_params = _encode_params(_params)
    if enc_params:
        if query:
            query = '%s&%s' % (query, enc_params)
        else:
            query = enc_params
    url = (urlunparse([scheme, netloc, path, params, query, fragment]))
    return url
예제 #11
0
파일: url.py 프로젝트: AugustLONG/scrapy
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
                     encoding=None):
    """Canonicalize the given url by applying the following procedures:

    - sort query arguments, first by key, then by value
    - percent encode paths and query arguments. non-ASCII characters are
      percent-encoded using UTF-8 (RFC-3986)
    - normalize all spaces (in query arguments) '+' (plus symbol)
    - normalize percent encodings case (%2f -> %2F)
    - remove query arguments with blank values (unless keep_blank_values is True)
    - remove fragments (unless keep_fragments is True)

    The url passed can be a str or unicode, while the url returned is always a
    str.

    For examples see the tests in tests/test_utils_url.py
    """

    scheme, netloc, path, params, query, fragment = parse_url(url)
    keyvals = parse_qsl(query, keep_blank_values)
    keyvals.sort()
    query = urlencode(keyvals)

    # XXX: copied from w3lib.url.safe_url_string to add encoding argument
    # path = to_native_str(path, encoding)
    # path = moves.urllib.parse.quote(path, _safe_chars, encoding='latin1') or '/'

    path = safe_url_string(_unquotepath(path)) or '/'
    fragment = '' if not keep_fragments else fragment
    return urlunparse((scheme, netloc.lower(), path, params, query, fragment))
예제 #12
0
파일: oauth2.py 프로젝트: pashinin/restkit
    def to_url(self):
        """Serialize as a URL for a GET request."""
        base_url = urlparse.urlparse(self.url)
        try:
            query = base_url.query
        except AttributeError:
            # must be python <2.5
            query = base_url[4]
        query = parse_qs(query)
        for k, v in self.items():
            if isinstance(v, six.text_type):
                v = v.encode("utf-8")
            query.setdefault(k, []).append(v)

        try:
            scheme = base_url.scheme
            netloc = base_url.netloc
            path = base_url.path
            params = base_url.params
            fragment = base_url.fragment
        except AttributeError:
            # must be python <2.5
            scheme = base_url[0]
            netloc = base_url[1]
            path = base_url[2]
            params = base_url[3]
            fragment = base_url[5]

        url = (scheme, netloc, path, params,
               urllib.urlencode(query, True), fragment)
        return urlparse.urlunparse(url)
예제 #13
0
    def get(self, request, *args, **kwargs):
        validate_session_for_mturk(request, self.session)
        mturk_settings = self.session.config['mturk_hit_settings']
        initial = {
            'title': mturk_settings['title'],
            'description': mturk_settings['description'],
            'keywords': ', '.join(mturk_settings['keywords']),
            'money_reward': self.session.config['participation_fee'],
            'in_sandbox': settings.DEBUG,
            'minutes_allotted_per_assignment': (
                mturk_settings['minutes_allotted_per_assignment']
            ),
            'expiration_hours': mturk_settings['expiration_hours'],
            'assignments': self.session.mturk_num_participants,
        }
        form = self.get_form(initial=initial)
        context = self.get_context_data(form=form)
        context['mturk_enabled'] = (
            bool(settings.AWS_ACCESS_KEY_ID) and
            bool(settings.AWS_SECRET_ACCESS_KEY)
        )
        context['runserver'] = 'runserver' in sys.argv
        url = self.request.build_absolute_uri(
            reverse('MTurkCreateHIT', args=(self.session.code,))
        )
        secured_url = urlunparse(urlparse(url)._replace(scheme='https'))
        context['secured_url'] = secured_url

        return self.render_to_response(context)
예제 #14
0
    def get_temp_url(self, container, object, timeout):
        """Returns the temp url for the given Swift object.

        :param container: The name of the container in which Swift object
            is placed.
        :param object: The name of the Swift object.
        :param timeout: The timeout in seconds after which the generated url
            should expire.
        :returns: The temp url for the object.
        :raises: SwiftOperationError, if any operation with Swift fails.
        """
        try:
            account_info = self.connection.head_account()
        except swift_exceptions.ClientException as e:
            operation = _("head account")
            raise exception.SwiftOperationError(operation=operation,
                                                error=e)

        storage_url, token = self.connection.get_auth()
        parse_result = parse.urlparse(storage_url)
        swift_object_path = '/'.join((parse_result.path, container, object))
        temp_url_key = account_info['x-account-meta-temp-url-key']
        url_path = swift_utils.generate_temp_url(swift_object_path, timeout,
                                                 temp_url_key, 'GET')
        return parse.urlunparse((parse_result.scheme,
                                 parse_result.netloc,
                                 url_path,
                                 None,
                                 None,
                                 None))
예제 #15
0
def update_content( article, content, siteurl ):
    self = article
    
    stripped_content = content.strip()
    if not stripped_content.startswith( PREFIX ):
        return content
    
    # remove {filename}
    url = stripped_content[ len( PREFIX ): ]
    
    parse_result = urlparse( url )
    path = parse_result.path
    
    if path.startswith( '/' ):
        path = path[ 1: ]
    else:
        # relative link
        path = self.get_relative_source_path( os.path.join( self.relative_dir, path ) )
    
    # unescape spaces if necessary
    if path not in self._context[ 'filenames' ]:
        path = path.replace( '%20', ' ' )

        if path not in self._context[ 'filenames' ]:
            logger.warning(
                "Unable to find `%s`, skipping url replacement.", url,
                extra = { 'limit_msg': ( "Other resources were not found and their urls not replaced" ) }
            )
            return content
    
    linked_content = self._context[ 'filenames' ][ path ]
    parts = list( parse_result )
    parts[ 2 ] = '/'.join( [ siteurl, linked_content.url ] )
    return urlunparse( parts )
예제 #16
0
    def __init__(self, info_yaml, basedir=''):
        """Import the solution's info.yaml file."""
        f, url_parts = self._open(info_yaml, basedir)
        solution_yaml = f.read().decode('utf-8')
        self.basedir = urlunparse((url_parts.scheme, url_parts.netloc,
                                   os.path.dirname(url_parts.path),
                                   None, None, None))

        # create a markdown converter and modify it to rebase image links
        markdown = Markdown()
        markdown.inlinePatterns['image_link'] = _RebasedImageLinkPattern(
            self.basedir, IMAGE_LINK_RE, markdown)
        markdown.inlinePatterns['image_reference'] = _RebasedImageRefPattern(
            self.basedir, IMAGE_REFERENCE_RE, markdown)

        # import the solution's metadata
        info = yaml.load(solution_yaml)
        self.id = hashlib.md5(solution_yaml.encode('utf-8')).hexdigest()
        self.title = info['name']
        self.release = str(info['release'])
        if 'logo' in info:
            self.logo = self._make_absolute_path(info.get('logo'),
                                                 self.basedir)[0]
        # in all the following fields, newlines are suppressed because they
        # are not rendered properly in Javascript strings by Django
        self.short_description = \
            markdown.convert(info['short_desc']).replace('\n', '')
        self.long_description = \
            markdown.convert(info['long_desc']).replace('\n', '')
        self.architecture = \
            markdown.convert(info['architecture']).replace('\n', '')
        self.design_specs = info.get('design_specs', [])
        self.heat_template = info['heat_template']
        self.env_file = info.get('env_file')  # environments are optional
예제 #17
0
    def methodNext(self, previous_request, previous_response):
        """Retrieves the next page of results.

Args:
  previous_request: The request for the previous page. (required)
  previous_response: The response from the request for the previous page. (required)

Returns:
  A request object that you can call 'execute()' on to request the next
  page. Returns None if there are no more items in the collection.
    """
        # Retrieve nextPageToken from previous_response
        # Use as pageToken in previous_request to create new request.

        if "nextPageToken" not in previous_response or not previous_response["nextPageToken"]:
            return None

        request = copy.copy(previous_request)

        pageToken = previous_response["nextPageToken"]
        parsed = list(urlparse(request.uri))
        q = parse_qsl(parsed[4])

        # Find and remove old 'pageToken' value from URI
        newq = [(key, value) for (key, value) in q if key != "pageToken"]
        newq.append(("pageToken", pageToken))
        parsed[4] = urlencode(newq)
        uri = urlunparse(parsed)

        request.uri = uri

        logger.info("URL being requested: {0!s} {1!s}".format(methodName, uri))

        return request
예제 #18
0
def configure_content(config):
    config.add_view(favicon_view, name='favicon.ico',
                    http_cache=(86400, {'public': True}))
    config.registry.skip_logging.add('/favicon.ico')
    config.add_view(robotstxt_view, name='robots.txt',
                    http_cache=(86400, {'public': True}))
    config.registry.skip_logging.add('/robots.txt')
    config.add_view(touchicon_view, name='apple-touch-icon-precomposed.png',
                    http_cache=(86400, {'public': True}))
    config.registry.skip_logging.add('/apple-touch-icon-precomposed.png')
    config.add_static_view(
        name='static', path='ichnaea.content:static', cache_max_age=86400)

    # BBB: leaders/leaders_weekly redirect to new service
    config.add_route('leaders_weekly', '/leaders/weekly')
    config.add_route('leaders', '/leaders')
    config.add_route('stats_regions', '/stats/regions')
    config.add_route('stats', '/stats')

    config.scan('ichnaea.content.views')

    assets_url = config.registry.settings.get('assets', {}).get('url', None)
    config.registry.tiles_url = tiles_url = map_tiles_url(assets_url)
    result = urlparse.urlsplit(tiles_url)
    tiles = urlparse.urlunparse((result.scheme, result.netloc, '', '', '', ''))
    config.registry.csp = CSP_POLICY.format(base=CSP_BASE, tiles=tiles)
예제 #19
0
파일: url.py 프로젝트: elacuesta/scrapy
def strip_url(url, strip_credentials=True, strip_default_port=True, origin_only=False, strip_fragment=True):

    """Strip URL string from some of its components:

    - ``strip_credentials`` removes "user:password@"
    - ``strip_default_port`` removes ":80" (resp. ":443", ":21")
      from http:// (resp. https://, ftp://) URLs
    - ``origin_only`` replaces path component with "/", also dropping
      query and fragment components ; it also strips credentials
    - ``strip_fragment`` drops any #fragment component
    """

    parsed_url = urlparse(url)
    netloc = parsed_url.netloc
    if (strip_credentials or origin_only) and (parsed_url.username or parsed_url.password):
        netloc = netloc.split('@')[-1]
    if strip_default_port and parsed_url.port:
        if (parsed_url.scheme, parsed_url.port) in (('http', 80),
                                                    ('https', 443),
                                                    ('ftp', 21)):
            netloc = netloc.replace(':{p.port}'.format(p=parsed_url), '')
    return urlunparse((
        parsed_url.scheme,
        netloc,
        '/' if origin_only else parsed_url.path,
        '' if origin_only else parsed_url.params,
        '' if origin_only else parsed_url.query,
        '' if strip_fragment else parsed_url.fragment
    ))
예제 #20
0
    def __init__(self, requirement_string):
        try:
            req = REQUIREMENT.parseString(requirement_string)
        except ParseException as e:
            raise InvalidRequirement(
                'Parse error at "{0!r}": {1}'.format(
                    requirement_string[e.loc : e.loc + 8], e.msg
                )
            )

        self.name = req.name
        if req.url:
            parsed_url = urlparse.urlparse(req.url)
            if parsed_url.scheme == "file":
                if urlparse.urlunparse(parsed_url) != req.url:
                    raise InvalidRequirement("Invalid URL given")
            elif not (parsed_url.scheme and parsed_url.netloc) or (
                not parsed_url.scheme and not parsed_url.netloc
            ):
                raise InvalidRequirement("Invalid URL: {0}".format(req.url))
            self.url = req.url
        else:
            self.url = None
        self.extras = set(req.extras.asList() if req.extras else [])
        self.specifier = SpecifierSet(req.specifier)
        self.marker = req.marker if req.marker else None
예제 #21
0
def relative_uri(source, target):
    """
    Make a relative URI from source to target.
    """
    su = urlparse.urlparse(source)
    tu = urlparse.urlparse(target)
    extra = list(tu[3:])
    relative = None
    if tu[0] == '' and tu[1] == '':
        if tu[2] == su[2]:
            relative = ''
        elif not tu[2].startswith('/'):
            relative = tu[2]
    elif su[0:2] != tu[0:2]:
        return target

    if relative is None:
        if tu[2] == su[2]:
            relative = ''
        else:
            relative = os.path.relpath(tu[2], os.path.dirname(su[2]))
    if relative == '.':
        relative = ''
    relative = urlparse.urlunparse(["", "", relative] + extra)
    return relative
    def create_plugin(self, session, version, url, raw_status=None):
        """Handle default Keystone endpoint configuration

        Build the actual API endpoint from the scheme, host and port of the
        original auth URL and the rest from the returned version URL.
        """

        ver_u = urlparse.urlparse(url)

        # Only hack this if it is the default setting
        if ver_u.netloc.startswith('localhost'):
            auth_u = urlparse.urlparse(self.auth_url)
            # from original auth_url: scheme, netloc
            # from api_url: path, query (basically, the rest)
            url = urlparse.urlunparse((
                auth_u.scheme,
                auth_u.netloc,
                ver_u.path,
                ver_u.params,
                ver_u.query,
                ver_u.fragment,
            ))
            LOG.debug('Version URL updated: %s' % url)

        return super(OSCGenericPassword, self).create_plugin(
            session=session,
            version=version,
            url=url,
            raw_status=raw_status,
        )
예제 #23
0
def merge(request, pageTree, removePanelLinks=False, removeLayoutLink=True):
    """Perform panel merging for the given page.

    Returns None if the page has no layout.
    """

    # Find layout node
    layoutHref = utils.xpath1(utils.layoutXPath, pageTree)
    if layoutHref is None:
        return None

    # Resolve layout tree
    baseURL = request.getURL()
    if request.getVirtualRoot():
        # plone.subrequest deals with VHM requests
        baseURL = ''
    layoutHref = parse.urljoin(baseURL, layoutHref)  # noqa: turn the link absolute
    # Pass special ajax_load parameter forward to allow layout indirection
    # views to select, for example, default AJAX layout instead of full layout.
    if request.form.get('ajax_load'):
        parts = list(parse.urlparse(layoutHref))
        query = parse.parse_qs(parts[4])
        query['ajax_load'] = request.form.get('ajax_load')
        parts[4] = parse.urlencode(query)
        layoutHref = parse.urlunparse(parts)
    layoutTree = utils.resolve(layoutHref)
    if layoutTree is None:
        return None

    # Map page panels onto the layout

    pagePanels = dict(
        (node.attrib['data-panel'], node)
        for node in utils.panelXPath(pageTree)
    )

    layoutPanels = dict(
        (node.attrib['data-panel'], node)
        for node in utils.panelXPath(layoutTree)
    )

    # Site layout should always have element with data-panel="content"
    # Note: This could be more generic, but that would empower editors too much
    if 'content' in pagePanels and 'content' not in layoutPanels:
        for node in layoutTree.xpath('//*[@id="content"]'):
            node.attrib['data-panel'] = 'content'
            layoutPanels['content'] = node
            break

    for panelId, layoutPanelNode in layoutPanels.items():
        pagePanelNode = pagePanels.get(panelId, None)
        if pagePanelNode is not None:
            utils.replace_content(layoutPanelNode, pagePanelNode)
        if removePanelLinks:
            del layoutPanelNode.attrib['data-panel']

    if removeLayoutLink:
        del pageTree.getroot().attrib[utils.layoutAttrib]

    return layoutTree
예제 #24
0
파일: oauth.py 프로젝트: mozilla/PyFxA
 def get_redirect_url(self, state="", redirect_uri=None, scope=None,
                      action=None, email=None, client_id=None,
                      code_challenge=None, code_challenge_method=None,
                      access_type=None, keys_jwk=None):
     """Get the URL to redirect to to initiate the oauth flow."""
     if client_id is None:
         client_id = self.client_id
     params = {
         "client_id": client_id,
         "state": state,
     }
     if redirect_uri is not None:
         params["redirect_uri"] = redirect_uri
     if scope is not None:
         params["scope"] = scope
     if action is not None:
         params["action"] = action
     if email is not None:
         params["email"] = email
     if code_challenge is not None:
         params["code_challenge"] = code_challenge
     if code_challenge_method is not None:
         params["code_challenge_method"] = code_challenge_method
     if keys_jwk is not None:
         params["keys_jwk"] = keys_jwk
     if access_type is not None:
         params["access_type"] = access_type
     query_str = urlencode(params)
     authorization_url = urlparse(self.server_url + "/authorization")
     return urlunparse(authorization_url._replace(query=query_str))
예제 #25
0
def build_url(base, additional_params=None):
    """Construct a URL based off of base containing all parameters in
    the query portion of base plus any additional parameters.

    :param base: Base URL
    :type base: str
    ::param additional_params: Additional query parameters to include.
    :type additional_params: dict
    :rtype: str
    """
    url = urlparse(base)
    query_params = {}
    query_params.update(parse_qsl(url.query, True))
    if additional_params is not None:
        query_params.update(additional_params)
        for k, v in six.iteritems(additional_params):
            if v is None:
                query_params.pop(k)

    return urlunparse((url.scheme,
                                url.netloc,
                                url.path,
                                url.params,
                                urlencode(query_params),
                                url.fragment))
예제 #26
0
 def check(self, instance):
     if not self.cadvisor_url:
         cadvisor_url = instance.get("cadvisor_url", None)
         detect_cadvisor_url = instance.get("kubernetes_detect_cadvisor", False)
         if not cadvisor_url:
             if detect_cadvisor_url:
                 kubernetes_connector = utils.KubernetesConnector(self.connection_timeout)
                 host = kubernetes_connector.get_agent_pod_host()
                 cadvisor_url = "http://{}:4194".format(host)
             else:
                 exception_message = "Either cAdvisor url or kubernetes " \
                                     "detect cAdvisor must be set when " \
                                     "monitoring a Kubernetes Node."
                 self.log.error(exception_message)
                 raise Exception(exception_message)
         self.cadvisor_url = "{}/{}".format(cadvisor_url, "api/v2.0/stats?count=1")
     dimensions = self._set_dimensions(None, instance)
     try:
         host_metrics = requests.get(self.cadvisor_url, self.connection_timeout).json()
     except Exception as e:
         self.log.error("Error communicating with cAdvisor to collect data - {}".format(e))
     else:
         # Retrieve machine info only once
         if not self.cadvisor_machine_url:
             # Replace path in current cadvisor_url
             result = urlparse(self.cadvisor_url)
             self.cadvisor_machine_url = urlunparse(result._replace(path="api/v2.0/machine"))
             try:
                 machine_info = requests.get(self.cadvisor_machine_url).json()
             except Exception as ex:
                 self.log.error(
                     "Error communicating with cAdvisor to collect machine data - {}".format(ex))
             else:
                 self._parse_machine_info(machine_info)
         self._parse_send_metrics(host_metrics, dimensions)
예제 #27
0
파일: filters.py 프로젝트: adamchainz/vcrpy
def replace_query_parameters(request, replacements):
    """
    Replace query parameters in request according to replacements. The
    replacements should be a list of (key, value) pairs where the value can be
    any of:
      1. A simple replacement string value.
      2. None to remove the given header.
      3. A callable which accepts (key, value, request) and returns a string
         value or None.
    """
    query = request.query
    new_query = []
    replacements = dict(replacements)
    for k, ov in query:
        if k not in replacements:
            new_query.append((k, ov))
        else:
            rv = replacements[k]
            if callable(rv):
                rv = rv(key=k, value=ov, request=request)
            if rv is not None:
                new_query.append((k, rv))
    uri_parts = list(urlparse(request.uri))
    uri_parts[4] = urlencode(new_query)
    request.uri = urlunparse(uri_parts)
    return request
예제 #28
0
def _parseURL(url):
    try:
        url = urinorm.urinorm(url)
    except ValueError:
        return None
    proto, netloc, path, params, query, frag = urlparse(url)
    if not path:
        # Python <2.4 does not parse URLs with no path properly
        if not query and '?' in netloc:
            netloc, query = netloc.split('?', 1)

        path = '/'

    path = urlunparse(('', '', path, params, query, frag))

    if ':' in netloc:
        try:
            host, port = netloc.split(':')
        except ValueError:
            return None

        if not re.match(r'\d+$', port):
            return None
    else:
        host = netloc
        port = ''

    host = host.lower()
    if not host_segment_re.match(host):
        return None

    return proto, host, port, path
예제 #29
0
파일: util.py 프로젝트: meteogrid/OWSLib
def clean_ows_url(url):
    """
    clean an OWS URL of basic service elements

    source: https://stackoverflow.com/a/11640565
    """

    if url is None or not url.startswith('http'):
        return url

    filtered_kvp = {}
    basic_service_elements = ('service', 'version', 'request')

    parsed = urlparse(url)
    qd = parse_qs(parsed.query, keep_blank_values=True)

    for key, value in qd.items():
        if key.lower() not in basic_service_elements:
            filtered_kvp[key] = value

    newurl = urlunparse([
        parsed.scheme,
        parsed.netloc,
        parsed.path,
        parsed.params,
        urlencode(filtered_kvp, doseq=True),
        parsed.fragment
    ])

    return newurl
예제 #30
0
    def get_sample_data(self, meter_name, parse_url, params, cache):

        extractor = self._get_extractor(meter_name)
        if extractor is None:
            # The way to getting meter is not implemented in this driver or
            # OpenDaylight REST API has not api to getting meter.
            return None

        iter = self._get_iter(meter_name)
        if iter is None:
            # The way to getting meter is not implemented in this driver or
            # OpenDaylight REST API has not api to getting meter.
            return None

        parts = urlparse.ParseResult(
            params.get("scheme", ["http"])[0], parse_url.netloc, parse_url.path, None, None, None
        )
        endpoint = urlparse.urlunparse(parts)

        data = self._prepare_cache(endpoint, params, cache)

        samples = []
        for name, value in six.iteritems(data):
            timestamp = value["timestamp"]
            for sample in iter(extractor, value):
                if sample is not None:
                    # set controller name and container name
                    # to resource_metadata
                    sample[2]["controller"] = "OpenDaylight"
                    sample[2]["container"] = name

                    samples.append(sample + (timestamp,))

        return samples
예제 #31
0
 def urlunparse(self, url_tuple):
     """Puts a url back together again, in the manner that
     urlparse breaks it.
     """
     return parse.urlunparse(url_tuple)
예제 #32
0
def spec_url(spec_file, schema='file'):
    directory = path.abspath(path.dirname(__file__))
    full_path = path.join(directory, spec_file)
    return urlunparse((schema, None, full_path, None, None, None))
예제 #33
0
    def post(self, request, *args, **kwargs):
        form = self.get_form(data=request.POST, files=request.FILES)
        if not form.is_valid():
            return self.form_invalid(form)
        session = self.session
        use_sandbox = 'use_sandbox' in form.data
        # session can't be created
        if (not self.in_public_domain(request, *args, **kwargs)
                and not use_sandbox):
            msg = ('<h1>Error: '
                   'oTree must run on a public domain for Mechanical Turk'
                   '</h1>')
            return HttpResponseServerError(msg)
        mturk_settings = session.config['mturk_hit_settings']
        qualification_id = mturk_settings.get('grant_qualification_id', None)
        # verify that specified qualification type
        # for preventing retakes exists on mturk server

        url_landing_page = self.request.build_absolute_uri(
            reverse('MTurkLandingPage', args=(session.code, )))

        # updating schema from http to https
        # this is compulsory for MTurk exteranlQuestion
        # TODO: validate, that the server support https
        #       (heroku does support by default)
        secured_url_landing_page = urlunparse(
            urlparse(url_landing_page)._replace(scheme='https'))

        # TODO: validate that there is enough money for the hit
        money_reward = form.cleaned_data['money_reward']

        # assign back to participation_fee, in case it was changed
        # in the form
        # need to convert back to RealWorldCurrency, because easymoney
        # MoneyFormField returns a decimal, not Money (not sure why)
        # see views.admin.EditSessionProperties
        session.config['participation_fee'] = RealWorldCurrency(money_reward)

        external_question = '''
        <ExternalQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd">
          <ExternalURL>{}</ExternalURL>
          <FrameHeight>{}</FrameHeight>
        </ExternalQuestion>
        '''.format(secured_url_landing_page, mturk_settings['frame_height'])

        qualifications = mturk_settings.get('qualification_requirements')

        if qualifications and not isinstance(qualifications[0], dict):
            raise ValueError(
                'settings.py: You need to upgrade your MTurk qualification_requirements '
                'to the boto3 format. See the documentation.')

        mturk_hit_parameters = {
            'Title':
            form.cleaned_data['title'],
            'Description':
            form.cleaned_data['description'],
            'Keywords':
            form.cleaned_data['keywords'],
            'Question':
            external_question,
            'MaxAssignments':
            form.cleaned_data['assignments'],
            'Reward':
            str(float(money_reward)),
            'QualificationRequirements':
            qualifications,
            'AssignmentDurationInSeconds':
            60 * form.cleaned_data['minutes_allotted_per_assignment'],
            'LifetimeInSeconds':
            int(60 * 60 * form.cleaned_data['expiration_hours']),
            # prevent duplicate HITs
            'UniqueRequestToken':
            'otree_{}'.format(session.code),
        }

        with MTurkClient(use_sandbox=use_sandbox,
                         request=request) as mturk_client:
            if qualification_id:
                try:
                    mturk_client.get_qualification_type(
                        QualificationTypeId=qualification_id)
                # it's RequestError, but
                except Exception as exc:
                    if use_sandbox:
                        sandbox_note = (
                            'You are currently using the sandbox, so you '
                            'can only grant qualifications that were '
                            'also created in the sandbox.')
                    else:
                        sandbox_note = (
                            'You are using the MTurk live site, so you '
                            'can only grant qualifications that were '
                            'also created on the live site, and not the '
                            'MTurk sandbox.')
                    msg = (
                        "In settings.py you specified qualification ID '{qualification_id}' "
                        "MTurk returned the following error: [{exc}] "
                        "Note: {sandbox_note}".format(
                            qualification_id=qualification_id,
                            exc=exc,
                            sandbox_note=sandbox_note))
                    messages.error(request, msg)
                    return HttpResponseRedirect(
                        reverse('MTurkCreateHIT', args=(session.code, )))

            hit = mturk_client.create_hit(**mturk_hit_parameters)['HIT']

            session.mturk_HITId = hit['HITId']
            session.mturk_HITGroupId = hit['HITGroupId']
            session.mturk_use_sandbox = use_sandbox
            session.save()

        return HttpResponseRedirect(
            reverse('MTurkCreateHIT', args=(session.code, )))
예제 #34
0
파일: depot.py 프로젝트: jahau/solaris-ips
                if not netloc:
                    raise OptionError("Unable to "
                                      "determine the hostname from "
                                      "the provided URL; please use a "
                                      "fully qualified URL.")

                scheme = scheme.lower()
                if scheme not in ("http", "https"):
                    raise OptionError("Invalid URL; http "
                                      "and https are the only supported "
                                      "schemes.")

                # Rebuild the url with the sanitized components.
                ivalues["pkg"]["proxy_base"] = \
                    urlunparse((scheme, netloc, path,
                    params, query, fragment))
            elif opt == "--readonly":
                ivalues["pkg"]["readonly"] = True
            elif opt == "--rebuild":
                rebuild = True
            elif opt == "--refresh-index":
                # Note: This argument is for internal use
                # only.
                #
                # This flag is purposefully omitted in usage.
                # The supported way to forcefully reindex is to
                # kill any pkg.depot using that directory,
                # remove the index directory, and restart the
                # pkg.depot process. The index will be rebuilt
                # automatically on startup.
                reindex = True
예제 #35
0
 def _create_endpoint(self, scheme, host, port, service_path):
     """Creates end point url for the service."""
     netloc = '%s:%s' % (host, port)
     self._endpoint = urlparse.urlunparse(
         (scheme, netloc, service_path, None, None, None))
예제 #36
0
파일: auth.py 프로젝트: ssundari/tempest
    def base_url(self, filters, auth_data=None):
        """Base URL from catalog

        Filters can be:
        - service: compute, image, etc
        - region: the service region
        - endpoint_type: adminURL, publicURL, internalURL
        - api_version: replace catalog version with this
        - skip_path: take just the base URL
        """
        if auth_data is None:
            auth_data = self.auth_data
        token, _auth_data = auth_data
        service = filters.get('service')
        region = filters.get('region')
        endpoint_type = filters.get('endpoint_type', 'public')

        if service is None:
            raise exceptions.EndpointNotFound("No service provided")

        if 'URL' in endpoint_type:
            endpoint_type = endpoint_type.replace('URL', '')
        _base_url = None
        catalog = _auth_data['catalog']
        # Select entries with matching service type
        service_catalog = [ep for ep in catalog if ep['type'] == service]
        if len(service_catalog) > 0:
            service_catalog = service_catalog[0]['endpoints']
        else:
            # No matching service
            raise exceptions.EndpointNotFound(service)
        # Filter by endpoint type (interface)
        filtered_catalog = [
            ep for ep in service_catalog if ep['interface'] == endpoint_type
        ]
        if len(filtered_catalog) == 0:
            # No matching type, keep all and try matching by region at least
            filtered_catalog = service_catalog
        # Filter by region
        filtered_catalog = [
            ep for ep in filtered_catalog if ep['region'] == region
        ]
        if len(filtered_catalog) == 0:
            # No matching region, take the first endpoint
            filtered_catalog = [service_catalog[0]]
        # There should be only one match. If not take the first.
        _base_url = filtered_catalog[0].get('url', None)
        if _base_url is None:
            raise exceptions.EndpointNotFound(service)

        parts = urlparse.urlparse(_base_url)
        if filters.get('api_version', None) is not None:
            version_path = '/%s' % filters['api_version']
            path = re.sub(r'(^|/)+v\d+(?:\.\d+)?',
                          version_path,
                          parts.path,
                          count=1)
            _base_url = urlparse.urlunparse(
                (parts.scheme, parts.netloc, path
                 or version_path, parts.params, parts.query, parts.fragment))
        if filters.get('skip_path', None) is not None:
            _base_url = urlparse.urlunparse(
                (parts.scheme, parts.netloc, '/', parts.params, parts.query,
                 parts.fragment))

        return _base_url
예제 #37
0
파일: util.py 프로젝트: njisrawi/eph
def addparams2url(url, params):
    if urlparse(url).query:
        return url + '&' + urlencode(params)
    else:
        return urlunparse(urlparse(url)) + '?' + urlencode(params)
예제 #38
0
def _DeriveCrmRegionalEndpoint(endpoint, location):
    scheme, netloc, path, params, query, fragment = parse.urlparse(endpoint)
    netloc = '{}-{}'.format(location, netloc)
    return parse.urlunparse((scheme, netloc, path, params, query, fragment))
예제 #39
0
    def _open_url(self, url):
        """
        Open an URL 'url' and return the file-like object of the opened URL.
        """
        def _print_warning(timeout):
            """
            This is a small helper function for printing a warning if we cannot
            open the URL for some time.
            """
            _log.warning("failed to open the URL with %d sec timeout, is the "
                         "proxy configured correctly? Keep trying ..." %
                         timeout)

        import socket

        from six.moves import http_client as httplib
        from six.moves.urllib import request as urllib
        from six.moves.urllib.error import URLError

        parsed_url = urlparse.urlparse(url)

        if parsed_url.scheme == "ssh":
            # Unfortunately, urllib2 does not handle "ssh://" URLs
            self._open_url_ssh(parsed_url)
            return

        username = parsed_url.username
        password = parsed_url.password

        if username and password:
            # Unfortunately, in order to handle URLs which contain user name
            # and password (e.g., http://user:[email protected]), we need to
            # do few extra things.
            new_url = list(parsed_url)
            if parsed_url.port:
                new_url[1] = "%s:%s" % (parsed_url.hostname, parsed_url.port)
            else:
                new_url[1] = parsed_url.hostname
            url = urlparse.urlunparse(new_url)

            # Build an URL opener which will do the authentication
            password_manager = urllib.HTTPPasswordMgrWithDefaultRealm()
            password_manager.add_password(None, url, username, password)
            auth_handler = urllib.HTTPBasicAuthHandler(password_manager)
            opener = urllib.build_opener(auth_handler)
        else:
            opener = urllib.build_opener()

        opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
        urllib.install_opener(opener)

        # Open the URL. First try with a short timeout, and print a message
        # which should supposedly give the a clue that something may be going
        # wrong.
        # The overall purpose of this is to improve user experience. For
        # example, if one tries to open a file but did not setup the proxy
        # environment variables propely, there will be a very long delay before
        # the failure message. And it is much nicer to pre-warn the user early
        # about something possibly being wrong.
        for timeout in (10, None):
            try:
                f_obj = opener.open(url, timeout=timeout)
            # Handling the timeout case in Python 2.7
            except socket.timeout as err:
                if timeout is not None:
                    _print_warning(timeout)
                else:
                    raise Error("cannot open URL '%s': %s" % (url, err))
            except URLError as err:
                # Handling the timeout case in Python 2.6
                if timeout is not None and \
                   isinstance(err.reason, socket.timeout):
                    _print_warning(timeout)
                else:
                    raise Error("cannot open URL '%s': %s" % (url, err))
            except (IOError, ValueError, httplib.InvalidURL) as err:
                raise Error("cannot open URL '%s': %s" % (url, err))
            except httplib.BadStatusLine:
                raise Error("cannot open URL '%s': server responds with an "
                            "HTTP status code that we don't understand" % url)

        self.is_url = True
        self._f_objs.append(f_obj)
예제 #40
0
def db_clean_legacy():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.strOpt('confirm-drop-table',
                   short='n',
                   help='confirm to drop the legacy tables')
    ])
    if not conf.confirm_drop_table:
        confirm = moves.input("Do you really want to drop the legacy "
                              "alarm and event tables? This will destroy "
                              "data definitively if it exist. Please type "
                              "'YES' to confirm: ")
        if confirm != 'YES':
            print("DB legacy cleanup aborted!")
            return

    service.prepare_service(conf=conf)

    url = (getattr(conf.database, "metering_connection")
           or conf.database.connection)
    parsed = urlparse.urlparse(url)

    if parsed.password:
        masked_netloc = '****'.join(parsed.netloc.rsplit(parsed.password))
        masked_url = parsed._replace(netloc=masked_netloc)
        masked_url = urlparse.urlunparse(masked_url)
    else:
        masked_url = url
    LOG.info(
        _LI('Starting to drop event, alarm and alarm history tables in '
            'backend: %s'), masked_url)

    connection_scheme = parsed.scheme
    conn = storage.get_connection_from_config(conf)
    if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql', 'sqlite'):
        engine = conn._engine_facade.get_engine()
        meta = sa.MetaData(bind=engine)
        for table_name in ('alarm', 'alarm_history', 'trait_text', 'trait_int',
                           'trait_float', 'trait_datetime', 'event',
                           'event_type'):
            if engine.has_table(table_name):
                table = sa.Table(table_name, meta, autoload=True)
                table.drop()
                LOG.info(
                    _LI("Legacy %s table of SQL backend has been "
                        "dropped."), table_name)
            else:
                LOG.info(_LI('%s table does not exist.'), table_name)

    elif connection_scheme == 'hbase':
        with conn.conn_pool.connection() as h_conn:
            tables = h_conn.tables()
            table_name_mapping = {
                'alarm': 'alarm',
                'alarm_h': 'alarm history',
                'event': 'event'
            }
            for table_name in ('alarm', 'alarm_h', 'event'):
                try:
                    if table_name in tables:
                        h_conn.disable_table(table_name)
                        h_conn.delete_table(table_name)
                        LOG.info(
                            _LI("Legacy %s table of Hbase backend "
                                "has been dropped."),
                            table_name_mapping[table_name])
                    else:
                        LOG.info(_LI('%s table does not exist.'),
                                 table_name_mapping[table_name])
                except Exception as e:
                    LOG.error(
                        _LE('Error occurred while dropping alarm '
                            'tables of Hbase, %s'), e)

    elif connection_scheme == 'mongodb':
        for table_name in ('alarm', 'alarm_history', 'event'):
            if table_name in conn.db.conn.collection_names():
                conn.db.conn.drop_collection(table_name)
                LOG.info(
                    _LI("Legacy %s table of Mongodb backend has been "
                        "dropped."), table_name)
            else:
                LOG.info(_LI('%s table does not exist.'), table_name)
    LOG.info('Legacy alarm and event tables cleanup done.')
    def _StartAction(self, mediaProvider):
        if not mediaProvider:
            raise RuntimeError('invalid mediaProvider')

        # if we are already connected check if something important changed in the media provider
        if self._connected:
            if kodi.Api.compareMediaProviders(self._mediaProvider,
                                              mediaProvider):
                # update the media provider and settings anyway
                self._mediaProvider = mediaProvider
                self._settings = self._mediaProvider.prepareSettings()
                return True

        self._StopAction(restart=True)

        self._mediaProvider = mediaProvider

        self._settings = self._mediaProvider.prepareSettings()
        if not self._settings:
            raise RuntimeError('cannot prepare media provider settings')

        try:
            # create emby server instance
            self._server = Server(self._mediaProvider)

            # authenticate with the Emby server
            authenticated = self._server.Authenticate()
        except:
            authenticated = False

        if not authenticated:
            ProviderObserver.log(
                'failed to authenticate with {}'.format(
                    mediaProvider2str(self._mediaProvider)), xbmc.LOGERROR)
            self._Reset()
            return False

        # analyze the media provider's URL
        urlParts = urlparse(self._mediaProvider.getBasePath())
        # determine the proper scheme (ws:// or wss://) and whether or not to verify the HTTPS certificate
        websocketScheme = 'wss' if urlParts.scheme == 'https' else 'ws'
        # put the urL back together
        url = urlunparse(
            urlParts._replace(scheme=websocketScheme, path='embywebsocket'))
        url = Url.addOptions(
            url, {
                URL_QUERY_API_KEY: self._server.AccessToken(),
                URL_QUERY_DEVICE_ID: self._server.DeviceId()
            })

        # create the websocket
        self._websocket = websocket.WebSocket()

        # connect the websocket
        try:
            self._websocket.connect(url)
        except Exception as err:
            ProviderObserver.log(
                'failed to connect to {} using a websocket. {}'.format(
                    url, err), xbmc.LOGERROR)
            self._Reset()
            return False

        # reduce the timeout
        self._websocket.settimeout(1.0)

        ProviderObserver.log(
            'successfully connected to {} to observe media imports'.format(
                mediaProvider2str(self._mediaProvider)))
        self._connected = True
        return True
예제 #42
0
    def get_canonicalized_asset_path(course_key,
                                     path,
                                     base_url,
                                     excluded_exts,
                                     encode=True):
        """
        Returns a fully-qualified path to a piece of static content.

        If a static asset CDN is configured, this path will include it.
        Otherwise, the path will simply be relative.

        Args:
            course_key: key to the course which owns this asset
            path: the path to said content

        Returns:
            string: fully-qualified path to asset
        """

        # Break down the input path.
        _, _, relative_path, params, query_string, _ = urlparse(path)

        # Convert our path to an asset key if it isn't one already.
        asset_key = StaticContent.get_asset_key_from_path(
            course_key, relative_path)

        # Check the status of the asset to see if this can be served via CDN aka publicly.
        serve_from_cdn = False
        content_digest = None
        try:
            content = AssetManager.find(asset_key, as_stream=True)
            serve_from_cdn = not getattr(content, "locked", True)
            content_digest = getattr(content, "content_digest", None)
        except (ItemNotFoundError, NotFoundError):
            # If we can't find the item, just treat it as if it's locked.
            serve_from_cdn = False

        # Do a generic check to see if anything about this asset disqualifies it from being CDN'd.
        is_excluded = False
        if StaticContent.is_excluded_asset_type(relative_path, excluded_exts):
            serve_from_cdn = False
            is_excluded = True

        # Update any query parameter values that have asset paths in them. This is for assets that
        # require their own after-the-fact values, like a Flash file that needs the path of a config
        # file passed to it e.g. /static/visualization.swf?configFile=/static/visualization.xml
        query_params = parse_qsl(query_string)
        updated_query_params = []
        for query_name, query_val in query_params:
            if query_val.startswith("/static/"):
                new_val = StaticContent.get_canonicalized_asset_path(
                    course_key,
                    query_val,
                    base_url,
                    excluded_exts,
                    encode=False)
                updated_query_params.append((query_name, new_val))
            else:
                # Make sure we're encoding Unicode strings down to their byte string
                # representation so that `urlencode` can handle it.
                updated_query_params.append(
                    (query_name, query_val.encode('utf-8')))

        serialized_asset_key = StaticContent.serialize_asset_key_with_slash(
            asset_key)
        base_url = base_url if serve_from_cdn else ''
        asset_path = serialized_asset_key

        # If the content has a digest (i.e. md5sum) value specified, create a versioned path to the asset using it.
        if not is_excluded and content_digest:
            asset_path = StaticContent.add_version_to_asset_path(
                serialized_asset_key, content_digest)

        # Only encode this if told to.  Important so that we don't double encode
        # when working with paths that are in query parameters.
        asset_path = asset_path.encode('utf-8')
        if encode:
            asset_path = quote_plus(asset_path, '/:+@')

        return urlunparse(('', base_url.encode('utf-8'), asset_path, params,
                           urlencode(updated_query_params), ''))
예제 #43
0
def build_api_serving_url(spec_dict, origin_url=None, preferred_scheme=None):
    """The URL used to service API requests does not necessarily have to be the
    same URL that was used to retrieve the API spec_dict.

    The existence of three fields in the root of the specification govern
    the value of the api_serving_url:

    - host string
        The host (name or ip) serving the API. This MUST be the host only and
        does not include the scheme nor sub-paths. It MAY include a port.
        If the host is not included, the host serving the documentation is to
        be used (including the port). The host does not support path templating.

    - basePath string
        The base path on which the API is served, which is relative to the
        host. If it is not included, the API is served directly under the host.
        The value MUST start with a leading slash (/). The basePath does not
        support path templating.

    - schemes [string]
        The transfer protocol of the API. Values MUST be from the list:
        "http", "https", "ws", "wss". If the schemes is not included,
        the default scheme to be used is the one used to access the
        specification.

    See https://github.com/swagger-api/swagger-spec_dict/blob/master/versions/2.0.md#swagger-object-   # noqa

    :param spec_dict: the Swagger spec in json-like dict form
    :param origin_url: the URL from which the spec was retrieved, if any. This
        is only used in Swagger clients.
    :param preferred_scheme: preferred scheme to use if more than one scheme is
        supported by the API.
    :return: base url which services api requests
    :raises: SwaggerSchemaError
    """
    origin_url = origin_url or 'http://localhost/'
    origin = urlparse(origin_url)

    def pick_a_scheme(schemes):
        if not schemes:
            return origin.scheme

        if preferred_scheme:
            if preferred_scheme in schemes:
                return preferred_scheme
            raise SwaggerSchemaError(
                "Preferred scheme {0} not supported by API. Available schemes "
                "include {1}".format(preferred_scheme, schemes))

        if origin.scheme in schemes:
            return origin.scheme

        if len(schemes) == 1:
            return schemes[0]

        raise SwaggerSchemaError(
            "Origin scheme {0} not supported by API. Available schemes "
            "include {1}".format(origin.scheme, schemes))

    netloc = spec_dict.get('host', origin.netloc)
    path = spec_dict.get('basePath', '/')
    scheme = pick_a_scheme(spec_dict.get('schemes'))
    return urlunparse((scheme, netloc, path, None, None, None))
예제 #44
0
 def get_repo_url(self):
     return urlunparse(("file", "", pathname2url(self.__dir), "", "", ""))
예제 #45
0
def encode_url_path(url, safe=SAFE_CHARS):
    from six.moves.urllib.parse import urlparse, urlunparse  # pylint: disable=import-error
    url_parts = urlparse(url)
    quoted_path = encode_for_url(url_parts.path, safe)
    return urlunparse(url_parts[:2] + (quoted_path, ) + url_parts[3:])
예제 #46
0
파일: path.py 프로젝트: populse/soma-base
def update_query_string(path,
                        params,
                        params_update_mode=QueryStringParamUpdateMode.REPLACE):
    '''
    Update the query string parameters in a path.

    Parameters
    ----------
    path: string
          The path to update parameters within.

    params: dict|list
          A dictionnary that contains keys and parameters to set in the query
          string

    params_update_mode: dict|string|list|int
          The default value is QueryStringParamUpdateMode.REPLACE that lead to
          replace value in the query string path by the one given in the params
          dictionary.

          It is possible to change the default behaviour giving the value
          QueryStringParamUpdateMode.APPEND. This will lead to always append
          values of the params dictionary to values of the query string path.
          The default behaviour can also be changed by specifying a parameter
          name as string, in this case only values for that parameter name will
          be appended. It can also contains a list or a tuple of parameter names
          for which values will be appended.

          Finally, this parameter can be a dictionary that specifies which
          parameter has to be appended or replaced. The dictionary contains
          parameter names in its keys and QueryStringParamUpdateMode in values.

    Returns
    -------
    path: string
          The path updated with given parameters

    Example
    -------
    A path containing a query string is::

        /dir1/file1?param1=val1&param2=val2&paramN=valN

    the params dictionary contains::

        {'param1':'newval1', param2=newval2', param3':'newval3'}

    ::

        update_query_string('/dir1/file1?param1=val1&param2=val2&paramN=valN',
                            {'param1':'newval1', 'param2':'newval2', 'param3':'newval3'})

    would return::

        '/dir1/file1?param1=newval1&param2=newval2&paramN=valN&param3=newval3'

    ::

        update_query_string('/dir1/file1?param1=val1&param2=val2&paramN=valN',
                            {'param1':'newval1', 'param2':'newval2', 'param3':'newval3'},
                            QueryStringParamUpdateMode.APPEND)

    would return::

        '/dir1/file1?param1=val1&param1=newval1&param2=val2&param2=newval2&paramN=valN&param3=newval3'

    ::

        update_query_string('/dir1/file1?param1=val1&param2=val2&paramN=valN',
                            {'param1':'newval1', 'param2':'newval2', 'param3':'newval3'},
                            'param2')

    would return::

        '/dir1/file1?param1=newval1&param2=val2&param2=newval2&paramN=valN&param3=newval3'

    ::

        update_query_string('/dir1/file1?param1=val1&param2=val2&paramN=valN',
                            {'param1':'newval1', 'param2':'newval2', 'param3':'newval3'},
                            ('param1', 'param2'))

    would return::

        '/dir1/file1?param1=val1&param1=newval1&param2=val2&param2=newval2&paramN=valN&param3=newval3'

    ::

        update_query_string('/dir1/file1?param1=val1&param2=val2&paramN=valN',
                            {'param1':'newval1', 'param2':'newval2', 'param3':'newval3'},
                            {'param1': QueryStringParamUpdateMode.APPEND,
                            'param2': QueryStringParamUpdateMode.REPLACE})

    would return::

        '/dir1/file1?param1=val1&param1=newval1&param2=val2&param2=newval2&paramN=valN&param3=newval3'
    '''
    from six.moves.urllib import parse as urllib
    urlparse = urllib

    # Convert params_update_mode to a dictionary that contains the update mode
    # for each parameter
    if type(params_update_mode) in (list, tuple):
        # Update mode is specified using a list of parameter names
        default_update_mode = QueryStringParamUpdateMode.REPLACE
        params_update = params_update_mode
        params_update_mode = dict()

        for p in params_update:
            if (type(p) in (list, tuple)):
                if (len(p) > 1):
                    params_update_mode[p[0]] = p[1]
                elif (len(p) > 0):
                    params_update_mode[
                        p[0]] = QueryStringParamUpdateMode.APPEND
            else:
                params_update_mode[p] = QueryStringParamUpdateMode.APPEND

    elif isinstance(params_update_mode, six.string_types):
        # A parameter name was given directly
        default_update_mode = QueryStringParamUpdateMode.REPLACE
        params_update_mode = dict(
            ((params_update_mode, QueryStringParamUpdateMode.APPEND), ))

    elif params_update_mode in (QueryStringParamUpdateMode.APPEND,
                                QueryStringParamUpdateMode.REPLACE,
                                QueryStringParamUpdateMode.REMOVE):
        # Update mode was specified for all parameters
        default_update_mode = params_update_mode
        params_update_mode = dict()

    elif type(params_update_mode) is dict:
        default_update_mode = QueryStringParamUpdateMode.REPLACE

    else:
        raise RuntimeError(
            'params_update_mode is not specified correctly. '
            'It must be either a dictionary that contains parameter names '
            'and the corresponding QueryStringParamUpdateMode, '
            'either a list that contains parameter names, either'
            'QueryStringParamUpdateMode.')

    url_parsed = strict_urlparse(path)
    url_params = urlparse.parse_qs(url_parsed.query)

    if isinstance(params, (list, tuple)):
        params = dict([(p, '') for p in params])

    # Update parameters dictionary
    for p, v in six.iteritems(params):
        update_mode = params_update_mode.get(p, default_update_mode)

        if update_mode == QueryStringParamUpdateMode.REPLACE:
            url_params[p] = v

        elif update_mode == QueryStringParamUpdateMode.APPEND:
            if type(v) in (list, tuple):
                if type(v) is tuple:
                    url_params[p] += list(v)
                else:
                    url_params[p] += v

            else:
                url_params.setdefault(p, list()).append(v)

        elif update_mode == QueryStringParamUpdateMode.REMOVE:
            del url_params[p]

        else:
            raise RuntimeError(
                'params_update_mode is not specified correctly. %s is '
                'not a valid value for parameter %s. Valid values are '
                'either QueryStringParamUpdateMode.APPEND, either'
                'QueryStringParamUpdateMode.REPLACE.' % (v, p))

    url_new = list(url_parsed)
    url_new[4] = urllib.urlencode(url_params, doseq=True)

    return urlparse.urlunparse(url_new)
예제 #47
0
def url(tgpath, tgparams=None, **kwargs):
    '''Computes URLs.

    This is a replacement for :func:`turbogears.controllers.url` (aka
    :func:`tg.url` in the template).  In addition to the functionality that
    :func:`tg.url` provides, it adds a token to prevent :term:`CSRF` attacks.

    :arg tgpath:  a list or a string. If the path is absolute (starts
        with a "/"), the :attr:`server.webpath`, :envvar:`SCRIPT_NAME` and
        the approot of the application are prepended to the path. In order for
        the approot to be detected properly, the root object should extend
        :class:`turbogears.controllers.RootController`.
    :kwarg tgparams: See param: ``kwargs``
    :kwarg kwargs: Query parameters for the URL can be passed in as a
        dictionary in the second argument *or* as keyword parameters.
        Values which are a list or a tuple are used to create multiple
        key-value pairs.
    :returns: The changed path

    .. versionadded:: 0.3.10
       Modified from turbogears.controllers.url for :ref:`CSRF-Protection`
    '''
    if not isinstance(tgpath, six.string_types):
        tgpath = '/'.join(list(tgpath))
    if tgpath.startswith('/'):
        webpath = (config.get('server.webpath') or '').rstrip('/')
        if tg_util.request_available():
            check_app_root()
            tgpath = request.app_root + tgpath
            try:
                webpath += request.wsgi_environ['SCRIPT_NAME'].rstrip('/')
            except (AttributeError, KeyError):  # pylint: disable-msg=W0704
                # :W0704: Lack of wsgi environ is fine... we still have
                # server.webpath
                pass
        tgpath = webpath + tgpath
    if tgparams is None:
        tgparams = kwargs
    else:
        try:
            tgparams = tgparams.copy()
            tgparams.update(kwargs)
        except AttributeError:
            raise TypeError('url() expects a dictionary for query parameters')
    args = []
    # Add the _csrf_token
    try:
        if identity.current.csrf_token:
            tgparams.update({'_csrf_token': identity.current.csrf_token})
    except RequestRequiredException:  # pylint: disable-msg=W0704
        # :W0704: If we are outside of a request (called from non-controller
        # methods/ templates) just don't set the _csrf_token.
        pass

    # Check for query params in the current url
    query_params = six.iteritems(tgparams)
    scheme, netloc, path, params, query_s, fragment = urlparse(tgpath)
    if query_s:
        query_params = chain(
            (p for p in cgi.parse_qsl(query_s) if p[0] != '_csrf_token'),
            query_params)

    for key, value in query_params:
        if value is None:
            continue
        if isinstance(value, (list, tuple)):
            pairs = [(key, v) for v in value]
        else:
            pairs = [(key, value)]
        for key, value in pairs:
            if value is None:
                continue
            if isinstance(value, unicode):
                value = value.encode('utf8')
            args.append((key, str(value)))
    query_string = urlencode(args, True)
    tgpath = urlunparse((scheme, netloc, path, params, query_string, fragment))
    return tgpath
예제 #48
0
def test_is_valid_url(url):
    unparsed_url = urllib_parse.urlunparse(url)
    assume(not unparsed_url.startswith("file://"))
    assert vistir.path.is_valid_url(unparsed_url)
예제 #49
0
def get_logged_in_program_certificate_url(certificate_url):
    parsed_url = urlparse(certificate_url)
    query_string = 'next=' + parsed_url.path
    url_parts = (parsed_url.scheme, parsed_url.netloc, '/login/', '',
                 query_string, '')
    return urlunparse(url_parts)
예제 #50
0
def prepare_service(args=None, conf=None,
                    default_config_files=None,
                    log_to_std=False, logging_level=None):
    if conf is None:
        conf = cfg.ConfigOpts()
    opts.set_defaults()
    # FIXME(jd) Use the pkg_entry info to register the options of these libs
    db_options.set_defaults(conf)
    policy_opts.set_defaults(conf)

    # Register our own Gnocchi options
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)

    conf.register_cli_opts(opts._cli_options)

    conf.set_default("workers", utils.get_default_workers(), group="metricd")

    conf(args, project='gnocchi', validate_default_values=True,
         default_config_files=default_config_files,
         version=pbr.version.VersionInfo('gnocchi').version_string())

    if not log_to_std and (conf.log_dir or conf.log_file):
        outputs = [daiquiri.output.File(filename=conf.log_file,
                                        directory=conf.log_dir)]
    else:
        outputs = [daiquiri.output.STDERR]

    if conf.use_syslog:
        outputs.append(
            daiquiri.output.Syslog(facility=conf.syslog_log_facility))

    if conf.use_journal:
        outputs.append(daiquiri.output.Journal())

    daiquiri.setup(outputs=outputs)
    if logging_level is None:
        if conf.debug:
            logging_level = logging.DEBUG
        elif conf.verbose:
            logging_level = logging.INFO
        else:
            logging_level = logging.WARNING
    logging.getLogger("gnocchi").setLevel(logging_level)

    # HACK(jd) I'm not happy about that, fix AP class to handle a conf object?
    archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = (
        conf.archive_policy.default_aggregation_methods
    )

    # If no coordination URL is provided, default to using the indexer as
    # coordinator
    if conf.storage.coordination_url is None:
        if conf.storage.driver == "redis":
            conf.set_default("coordination_url",
                             conf.storage.redis_url,
                             "storage")
        elif conf.incoming.driver == "redis":
            conf.set_default("coordination_url",
                             conf.incoming.redis_url,
                             "storage")
        else:
            parsed = urlparse.urlparse(conf.indexer.url)
            proto, _, _ = parsed.scheme.partition("+")
            parsed = list(parsed)
            # Set proto without the + part
            parsed[0] = proto
            conf.set_default("coordination_url",
                             urlparse.urlunparse(parsed),
                             "storage")

    cfg_path = conf.oslo_policy.policy_file
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)
    if cfg_path is None or not os.path.exists(cfg_path):
        cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                'rest', 'policy.json'))
    conf.set_default('policy_file', cfg_path, group='oslo_policy')

    conf.log_opt_values(LOG, logging.DEBUG)

    return conf
예제 #51
0
파일: util.py 프로젝트: sailfish009/clearml
def quote_url(url):
    parsed = urlparse(url)
    if parsed.scheme not in ('http', 'https'):
        return url
    parsed = parsed._replace(path=quote(parsed.path))
    return urlunparse(parsed)
예제 #52
0
    def replacer(siteurl, m):
        what = m.group('what')
        value = urlparse(m.group('value'))
        path = value.path
        origin = m.group('path')

        # XXX Put this in a different location.
        if what in {'filename', 'attach'}:
            if path.startswith('/'):
                path = path[1:]
            else:
                # relative to the source path of this content
                path = content.get_relative_source_path(
                    os.path.join(content.relative_dir, path))

            if path not in content._context['filenames']:
                unquoted_path = path.replace('%20', ' ')

                if unquoted_path in content._context['filenames']:
                    path = unquoted_path

            linked_content = content._context['filenames'].get(path)
            if linked_content:
                if what == 'attach':
                    if isinstance(linked_content, Static):
                        linked_content.attach_to(content)
                    else:
                        logger.warning(
                            "%s used {attach} link syntax on a "
                            "non-static file. Use {filename} instead.",
                            content.get_relative_source_path())
                origin = '/'.join((siteurl, linked_content.url))
                origin = origin.replace('\\', '/')  # for Windows paths.
            else:
                logger.warning(
                    "Unable to find `%s`, skipping url replacement.",
                    value.geturl(),
                    extra={
                        'limit_msg': ("Other resources were not found "
                                      "and their urls not replaced")
                    })
        elif what == 'category':
            origin = '/'.join((siteurl, Category(path, content.settings).url))
        elif what == 'tag':
            origin = '/'.join((siteurl, Tag(path, content.settings).url))
        elif what == 'index':
            origin = '/'.join((siteurl, content.settings['INDEX_SAVE_AS']))
        elif what == 'author':
            origin = '/'.join((siteurl, Author(path, content.settings).url))
        else:
            logger.warning(
                "Replacement Indicator '%s' not recognized, "
                "skipping replacement", what)

        # keep all other parts, such as query, fragment, etc.
        parts = list(value)
        parts[2] = origin
        origin = urlunparse(parts)

        return ''.join(
            (m.group('markup'), m.group('quote'), origin, m.group('quote')))
예제 #53
0
def canonicalize_url(url,
                     keep_blank_values=True,
                     keep_fragments=False,
                     encoding=None):
    r"""Canonicalize the given url by applying the following procedures:

    - sort query arguments, first by key, then by value
    - percent encode paths ; non-ASCII characters are percent-encoded
      using UTF-8 (RFC-3986)
    - percent encode query arguments ; non-ASCII characters are percent-encoded
      using passed `encoding` (UTF-8 by default)
    - normalize all spaces (in query arguments) '+' (plus symbol)
    - normalize percent encodings case (%2f -> %2F)
    - remove query arguments with blank values (unless `keep_blank_values` is True)
    - remove fragments (unless `keep_fragments` is True)

    The url passed can be bytes or unicode, while the url returned is
    always a native str (bytes in Python 2, unicode in Python 3).

    >>> import w3lib.url
    >>>
    >>> # sorting query arguments
    >>> w3lib.url.canonicalize_url('http://www.example.com/do?c=3&b=5&b=2&a=50')
    'http://www.example.com/do?a=50&b=2&b=5&c=3'
    >>>
    >>> # UTF-8 conversion + percent-encoding of non-ASCII characters
    >>> w3lib.url.canonicalize_url(u'http://www.example.com/r\u00e9sum\u00e9')
    'http://www.example.com/r%C3%A9sum%C3%A9'
    >>>

    For more examples, see the tests in `tests/test_url.py`.
    """
    # If supplied `encoding` is not compatible with all characters in `url`,
    # fallback to UTF-8 as safety net.
    # UTF-8 can handle all Unicode characters,
    # so we should be covered regarding URL normalization,
    # if not for proper URL expected by remote website.
    try:
        scheme, netloc, path, params, query, fragment = _safe_ParseResult(
            parse_url(url), encoding=encoding)
    except UnicodeEncodeError as e:
        scheme, netloc, path, params, query, fragment = _safe_ParseResult(
            parse_url(url), encoding='utf8')

    # 1. decode query-string as UTF-8 (or keep raw bytes),
    #    sort values,
    #    and percent-encode them back
    if six.PY2:
        keyvals = parse_qsl(query, keep_blank_values)
    else:
        # Python3's urllib.parse.parse_qsl does not work as wanted
        # for percent-encoded characters that do not match passed encoding,
        # they get lost.
        #
        # e.g., 'q=b%a3' becomes [('q', 'b\ufffd')]
        # (ie. with 'REPLACEMENT CHARACTER' (U+FFFD),
        #      instead of \xa3 that you get with Python2's parse_qsl)
        #
        # what we want here is to keep raw bytes, and percent encode them
        # so as to preserve whatever encoding what originally used.
        #
        # See https://tools.ietf.org/html/rfc3987#section-6.4:
        #
        # For example, it is possible to have a URI reference of
        # "http://www.example.org/r%E9sum%E9.xml#r%C3%A9sum%C3%A9", where the
        # document name is encoded in iso-8859-1 based on server settings, but
        # where the fragment identifier is encoded in UTF-8 according to
        # [XPointer]. The IRI corresponding to the above URI would be (in XML
        # notation)
        # "http://www.example.org/r%E9sum%E9.xml#r&#xE9;sum&#xE9;".
        # Similar considerations apply to query parts.  The functionality of
        # IRIs (namely, to be able to include non-ASCII characters) can only be
        # used if the query part is encoded in UTF-8.
        keyvals = parse_qsl_to_bytes(query, keep_blank_values)
    keyvals.sort()
    query = urlencode(keyvals)

    # 2. decode percent-encoded sequences in path as UTF-8 (or keep raw bytes)
    #    and percent-encode path again (this normalizes to upper-case %XX)
    uqp = _unquotepath(path)
    path = quote(uqp, _safe_chars) or '/'

    fragment = '' if not keep_fragments else fragment

    # every part should be safe already
    return urlunparse(
        (scheme, netloc.lower().rstrip(':'), path, params, query, fragment))
예제 #54
0
    def _load(self, path, crawl=True, originaluri=None, includelogs=False,\
                        init=True, loadtype='href', loadcomplete=False, \
                                                rel=False, prevpath=None):
        """Helper function to main load function.

        :param path: path to start load from.
        :type path: str.
        :param crawl: flag to determine if load should traverse found links.
        :type crawl: boolean.
        :param originaluri: variable to assist in determining originating path.
        :type originaluri: str.
        :param includelogs: flag to determine if logs should be downloaded also.
        :type includelogs: boolean.
        :param init: flag to determine if first run of load.
        :type init: boolean.
        :param loadtype: flag to determine if load is meant for only href items.
        :type loadtype: str.
        :param loadcomplete: flag to download the entire monolith
        :type loadcomplete: boolean

        """

        if path.endswith("?page=1") and not loadcomplete:
            return
        elif not includelogs and not crawl:
            if "/Logs" in path:
                return

        #TODO: need to find a better way to support non ascii characters
        path = path.replace("|", "%7C")
        #remove fragments
        newpath = urlparse(path)
        newpath = list(newpath[:])
        newpath[-1] = ''
        path = urlunparse(tuple(newpath))

        if prevpath and prevpath != path:
            self.ctree[prevpath].update([path])
        if not rel:
            if path.lower() in self.visited_urls:
                return
        LOGGER.debug('_loading %s', path)

        resp = self._client().get(path)

        if resp.status != 200 and path.lower() == self.typepath.defs.biospath:
            raise BiosUnregisteredError()
        elif resp.status == 401:
            raise SessionExpired("Invalid session. Please logout and "\
                                    "log back in or include credentials.")
        elif resp.status not in (201, 200):
            self.removepath(path)
            return

        if loadtype == "ref":
            try:
                if resp.status in (201, 200):
                    self.update_member(resp=resp, path=path, init=init)
                self.parse_schema(resp)
            except jsonpointer.JsonPointerException:
                raise SchemaValidationError()

        self.update_member(resp=resp, path=path, init=init)

        fpath = lambda pa, path: path if pa.endswith(self.typepath.defs.hrefstring) and \
            pa.startswith((self.collstr, 'Entries')) else None

        if loadtype == 'href':
            #follow all the href attributes
            if self.is_redfish:
                jsonpath_expr = jsonpath_rw.parse("$..'@odata.id'")
            else:
                jsonpath_expr = jsonpath_rw.parse('$..href')
            matches = jsonpath_expr.find(resp.dict)

            if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
                if originaluri:
                    next_link_uri = originaluri + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])
                    href = '%s' % next_link_uri

                    self._load(href, originaluri=originaluri, \
                               includelogs=includelogs, crawl=crawl, \
                               init=init, prevpath=None, loadcomplete=loadcomplete)
                else:
                    next_link_uri = path + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])

                    href = '%s' % next_link_uri
                    self._load(href, originaluri=path, includelogs=includelogs,\
                        crawl=crawl, init=init, prevpath=None, loadcomplete=loadcomplete)

            matchrdirpath = next((match for match in matches if match.value == \
                                                    self._resourcedir), None)
            if not matchrdirpath and crawl:
                for match in matches:
                    if path == "/rest/v1" and not loadcomplete:
                        if str(match.full_path) == "links.Schemas.href" or \
                                str(match.full_path) == "links.Registries.href":
                            continue
                    elif not loadcomplete:
                        if str(match.full_path) == "*****@*****.**" or \
                                str(match.full_path) == "*****@*****.**":
                            continue

                    if match.value == path:
                        continue
                    elif not isinstance(match.value, six.string_types):
                        continue

                    href = '%s' % match.value
                    self._load(href, crawl=crawl, \
                       originaluri=originaluri, includelogs=includelogs, \
                       init=init, prevpath=fpath(str(match.full_path), path), \
                       loadcomplete=loadcomplete)
            elif crawl:
                href = '%s' % matchrdirpath.value
                self._load(href, crawl=crawl, originaluri=originaluri, \
                    includelogs=includelogs, init=init, prevpath=path, loadcomplete=loadcomplete)
            if loadcomplete:
                if path == '/rest/v1':
                    schemamatch = jsonpath_rw.parse('$..extref')
                else:
                    schemamatch = jsonpath_rw.parse('$..Uri')
                smatches = schemamatch.find(resp.dict)
                matches = matches + smatches
                for match in matches:
                    if isinstance(match.value, six.string_types):
                        self._load(match.value, crawl=crawl, originaluri=originaluri,\
                        includelogs=includelogs, init=init, loadcomplete=loadcomplete,\
                                     prevpath=fpath(str(match.full_path), path))
예제 #55
0
def DeriveRegionalEndpoint(endpoint, region):
    scheme, netloc, path, params, query, fragment = urlparse.urlparse(endpoint)
    netloc = '{}-{}'.format(region, netloc)
    return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
예제 #56
0
파일: models.py 프로젝트: haijohn/rabix
 def geturl(self):
     return urlunparse((self.scheme, self.netloc, self.path, self.params,
                        self.query, self.fragment))
예제 #57
0
def _marshal_uri(target_uri, origin_uri):
    """
    Translate the URL string representation into a new string which could be used as JSON keys.
    This method is needed because many JSON parsers and reference resolvers are using '/' as
    indicator of object nesting.

    To workaround this limitation we can re-write the url representation in a way that the parsers
    will accept it, for example "#/definitions/data_type" could become "|..definitions..data_type"

    Example: Assume that you have the following JSON document
        {
            "definitions": {
                "a/possible/def": {
                    "type": "object"
                },
                "a": {
                    "possible": {
                        "def": {
                            "type": "string"
                        }
                    }
                },
                "def": {
                    "$ref": "#/definitions/a/possible/def"
                }
            }
        }

    Assuming that the JSON parser is not raising exception the dereferenced value of
    "#/definitions/def" could be {"type": "object"} or {"type": "string"} which is
    an undetermined condition which can lead to weird errors.
    Let's assume instead that the JSON parser will raise an exception in this case
    the JSON object will not be usable.

    To prevent this conditions we are removing possible '/' from the JSON keys.

    :param target_uri: URI to marshal
    :type target_uri: ParseResult
    :param origin_uri: URI of the root swagger spec file
    :type origin_uri: ParseResult

    :return: a string representation of the URL which could be used into the JSON keys
    :rtype: str
    """

    marshalled_target = urlunparse(target_uri)

    if marshalled_target and target_uri.scheme == '':  # scheme is empty for relative paths. It should NOT happen!
        target_uri = ParseResult('file', *target_uri[1:])
        marshalled_target = urlunparse(target_uri)

    if not marshalled_target or target_uri.scheme not in {
            'file', 'http', 'https'
    }:
        raise ValueError('Invalid target: \'{target_uri}\''.format(
            target_uri=urlunparse(target_uri)))

    if origin_uri and target_uri.scheme == 'file':
        scheme, netloc, path, params, query, fragment = target_uri

        # Masquerade the absolute file path on the "local" server using
        # relative paths from the root swagger spec file
        spec_dir = os.path.dirname(origin_uri.path)
        scheme = 'lfile'
        path = os.path.relpath(path, spec_dir)
        marshalled_target = urlunparse(
            (scheme, netloc, path, params, query, fragment))

    for src, dst in iteritems(MARSHAL_REPLACEMENT_PATTERNS):
        marshalled_target = marshalled_target.replace(src, dst)
    return marshalled_target
예제 #58
0
import pytest

from cloudmanager.setup_db import setup_db

import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from settings import TEST_DB_PATH
#from utils.migrate import run_migrations

test_uri = list(urlparse(TEST_DB_PATH))

DB_NAME = test_uri[2].replace('/', '')
# Connection URI used for creating the test database
# Strip the database name from the URI since the DB doesn't exist yet
test_uri[2] = ''
PG_URI = urlunparse(test_uri)

DB_CONN = None       # Connection for creating the test database
DB_TEST_CONN = None  # Connection to the test database used by tests

def pytest_configure(config):
    """Create test database and initialize connection"""
    global DB_CONN, DB_TEST_CONN
    try:
        DB_CONN = psycopg2.connect(PG_URI)
        DB_CONN.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        cur = DB_CONN.cursor()
        try:
            cur.execute('CREATE DATABASE %s;' % DB_NAME)
        except:
            cur.execute('DROP DATABASE %s;' % DB_NAME)
예제 #59
0
def _get_unversioned_endpoint(base_url):
    endpoint_parts = urlparse.urlparse(base_url)
    new_path = _remove_version_project(endpoint_parts.path)
    endpoint_parts = endpoint_parts._replace(path=new_path)
    endpoint = urlparse.urlunparse(endpoint_parts)
    return endpoint
예제 #60
0
    def _finalize_core(self, **defaults):
        """
        Complete initialization of standard IPA environment.

        This method will perform the following steps:

            1. Call `Env._bootstrap()` if it hasn't already been called.

            2. Merge-in variables from the configuration file ``self.conf``
               (if it exists) by calling `Env._merge_from_file()`.

            3. Merge-in variables from the defaults configuration file
               ``self.conf_default`` (if it exists) by calling
               `Env._merge_from_file()`.

            4. Intelligently fill-in the *in_server* , *logdir*, *log*, and
               *jsonrpc_uri* variables if they haven't already been set.

            5. Merge-in the variables in ``defaults`` by calling `Env._merge()`.
               In normal circumstances ``defaults`` will simply be those
               specified in `constants.DEFAULT_CONFIG`.

        After this method is called, all the environment variables used by all
        the built-in plugins will be available.  As such, this method should be
        called *before* any plugins are loaded.

        After this method has finished, the `Env` instance is still writable
        so that 3rd-party plugins can set variables they may require as the
        plugins are registered.

        Also see `Env._finalize()`, the final method in the bootstrap sequence.

        :param defaults: Internal defaults for all built-in variables.
        """
        self.__doing('_finalize_core')
        self.__do_if_not_done('_bootstrap')

        # Merge in context config file and then default config file:
        if self.__d.get('mode', None) != 'dummy':
            self._merge_from_file(self.conf)
            self._merge_from_file(self.conf_default)

        # Determine if in_server:
        if 'in_server' not in self:
            self.in_server = (self.context == 'server')

        # Set logdir:
        if 'logdir' not in self:
            if self.in_tree or not self.in_server:
                self.logdir = self._join('dot_ipa', 'log')
            else:
                self.logdir = path.join('/', 'var', 'log', 'ipa')

        # Set log file:
        if 'log' not in self:
            self.log = self._join('logdir', '%s.log' % self.context)

        # Workaround for ipa-server-install --uninstall. When no config file
        # is available, we set realm, domain, and basedn to RFC 2606 reserved
        # suffix to suppress attribute errors during uninstallation.
        if (self.in_server and self.context == 'installer' and
                not getattr(self, 'config_loaded', False)):
            if 'realm' not in self:
                self.realm = 'UNCONFIGURED.INVALID'
            if 'domain' not in self:
                self.domain = self.realm.lower()

        if 'basedn' not in self and 'domain' in self:
            self.basedn = DN(*(('dc', dc) for dc in self.domain.split('.')))

        # Derive xmlrpc_uri from server
        # (Note that this is done before deriving jsonrpc_uri from xmlrpc_uri
        # and server from jsonrpc_uri so that when only server or xmlrpc_uri
        # is specified, all 3 keys have a value.)
        if 'xmlrpc_uri' not in self and 'server' in self:
            self.xmlrpc_uri = 'https://{}/ipa/xml'.format(self.server)

        # Derive ldap_uri from server
        if 'ldap_uri' not in self and 'server' in self:
            self.ldap_uri = 'ldap://{}'.format(self.server)

        # Derive jsonrpc_uri from xmlrpc_uri
        if 'jsonrpc_uri' not in self:
            if 'xmlrpc_uri' in self:
                xmlrpc_uri = self.xmlrpc_uri
            else:
                xmlrpc_uri = defaults.get('xmlrpc_uri')
            if xmlrpc_uri:
                (scheme, netloc, uripath, params, query, fragment
                        ) = urlparse(xmlrpc_uri)
                uripath = uripath.replace('/xml', '/json', 1)
                self.jsonrpc_uri = urlunparse((
                        scheme, netloc, uripath, params, query, fragment))

        if 'server' not in self:
            if 'jsonrpc_uri' in self:
                jsonrpc_uri = self.jsonrpc_uri
            else:
                jsonrpc_uri = defaults.get('jsonrpc_uri')
            if jsonrpc_uri:
                parsed = urlparse(jsonrpc_uri)
                self.server = parsed.netloc

        self._merge(**defaults)

        # set the best known TLS version if min/max versions are not set
        if 'tls_version_min' not in self:
            self.tls_version_min = TLS_VERSIONS[-1]
        elif self.tls_version_min not in TLS_VERSIONS:
            raise errors.EnvironmentError(
                "Unknown TLS version '{ver}' set in tls_version_min."
                .format(ver=self.tls_version_min))

        if 'tls_version_max' not in self:
            self.tls_version_max = TLS_VERSIONS[-1]
        elif self.tls_version_max not in TLS_VERSIONS:
            raise errors.EnvironmentError(
                "Unknown TLS version '{ver}' set in tls_version_max."
                .format(ver=self.tls_version_max))

        if self.tls_version_max < self.tls_version_min:
            raise errors.EnvironmentError(
                "tls_version_min is set to a higher TLS version than "
                "tls_version_max.")