コード例 #1
0
ファイル: case.py プロジェクト: cdent/gabbi
    def _parse_url(self, url):
        """Create a url from test data.

        If provided with a full URL, just return that. If SSL is requested
        set the scheme appropriately.

        Scheme and netloc are saved for later use in comparisons.
        """
        query_params = self.test_data['query_parameters']
        ssl = self.test_data['ssl']

        parsed_url = urlparse.urlsplit(url)
        if not parsed_url.scheme:
            full_url = utils.create_url(url, self.host, port=self.port,
                                        prefix=self.prefix, ssl=ssl)
            # parse again to set updated netloc and scheme
            parsed_url = urlparse.urlsplit(full_url)

        self.scheme = parsed_url.scheme
        self.netloc = parsed_url.netloc

        if query_params:
            query_string = self._update_query_params(parsed_url.query,
                                                     query_params)
        else:
            query_string = parsed_url.query

        return urlparse.urlunsplit((parsed_url.scheme, parsed_url.netloc,
                                    parsed_url.path, query_string, ''))
コード例 #2
0
def test_authentication_disabled_app(authentication_disabled_app):
    # app.auth should = false
    assert mg_globals
    assert mg_globals.app.auth is False

    # Try to visit register page
    template.clear_test_template_context()
    response = authentication_disabled_app.get('/auth/register/')
    response.follow()

    # Correct redirect?
    assert urlparse.urlsplit(response.location)[2] == '/'
    assert 'mediagoblin/root.html' in template.TEMPLATE_TEST_CONTEXT

    # Try to vist login page
    template.clear_test_template_context()
    response = authentication_disabled_app.get('/auth/login/')
    response.follow()

    # Correct redirect?
    assert urlparse.urlsplit(response.location)[2] == '/'
    assert 'mediagoblin/root.html' in template.TEMPLATE_TEST_CONTEXT

    ## Test check_login_simple should return None
    assert auth_tools.check_login_simple('test', 'simple') is None

    # Try to visit the forgot password page
    template.clear_test_template_context()
    response = authentication_disabled_app.get('/auth/register/')
    response.follow()

    # Correct redirect?
    assert urlparse.urlsplit(response.location)[2] == '/'
    assert 'mediagoblin/root.html' in template.TEMPLATE_TEST_CONTEXT
コード例 #3
0
ファイル: migrate4.0.py プロジェクト: webrecorder/webrecorder
    def copy_rec_files_s3(self, user, collection, recording, warc_files):
        coll_warc_key = recording.COLL_WARC_KEY.format(coll=collection.my_id)
        rec_warc_key = recording.REC_WARC_KEY.format(rec=recording.my_id)

        # Copy WARCs
        total_size = 0

        coll_root = self.s3_root + collection.get_dir_path() + '/'

        for n, url in warc_files.items():
            if not url.startswith('s3://'):
                print('Skipping: ' + url)
                continue

            src_parts = urlsplit(url)
            src_bucket = src_parts.netloc
            src_key = src_parts.path.lstrip('/')

            try:
                res = self.s3.head_object(Bucket=src_bucket,
                                          Key=src_key)
            except Exception as e:
                print('Skipping: ' + url)
                print(e)
                continue

            size = res['ContentLength']

            if n != recording.INDEX_FILE_KEY:
                target_file = coll_root + 'warcs/' + n

                self.redis.hset(coll_warc_key, n, target_file)
                self.redis.sadd(rec_warc_key, n)
                total_size += size
            else:
                target_file = coll_root + 'indexes/' + os.path.basename(url)
                recording.set_prop(n, target_file, update_ts=False)

            # target
            target_parts = urlsplit(target_file)
            target_bucket = target_parts.netloc
            target_key = target_parts.path.lstrip('/')

            params = dict(Bucket=target_bucket,
                          Key=target_key,
                          CopySource=dict(Bucket=src_bucket, Key=src_key))

            print('    Copying:')
            print('      From: s3://' + src_bucket + '/' + src_key)
            print('      To: s3://' + target_bucket + '/' + target_key)
            print('      Size: ' + str(size))

            try:
                if not self.dry_run:
                    res = self.s3.copy_object(**params)
            except Exception as e:
                print('    ERROR:')
                print(e)

        return total_size
コード例 #4
0
ファイル: common.py プロジェクト: openstack/manila
 def _update_link_prefix(self, orig_url, prefix):
     if not prefix:
         return orig_url
     url_parts = list(parse.urlsplit(orig_url))
     prefix_parts = list(parse.urlsplit(prefix))
     url_parts[0:2] = prefix_parts[0:2]
     return parse.urlunsplit(url_parts)
コード例 #5
0
ファイル: base.py プロジェクト: Xaroth/plex-export
 def __init__(self, base, relative=None):
     self._has_token = False
     self._url = None
     self._url_parts = None
     self._loaded = False
     self._xml = None
     self._url_parts = None
     self._headers = None
     self._config = None
     if isinstance(base, six.string_types):
         base_url = base
         self._url_parts = list(parse.urlsplit(base_url))
     elif isinstance(base, RequestBase):
         base_url = base.url
         self._has_token = base.has_token
         self._url_parts = base._url_parts[:]
         self._headers = base._headers
         self._config = base.config
     if relative:
         scheme, netloc, path, qs, fragment = parse.urlsplit(relative)
         if path:
             self._url_parts[2] = _join_plex(self._url_parts[2], path)
         if qs:
             data = parse.parse_qsl(self._url_parts[3]) + parse.parse_qsl(qs)
             self._url_parts[3] = parse.urlencode(data)
         else:
             # Strip of all non-token parts
             data = parse.parse_qsl(self._url_parts[3])
             self._url_parts[3] = parse.urlencode([(x, y) for x, y in data if x == 'X-Plex-Token'])
     if not self._has_token:
         self._has_token = 'X-Plex-Token' in parse.parse_qs(self._url_parts[3])
     self._url = parse.urlunsplit(self._url_parts)
コード例 #6
0
ファイル: test_notifier.py プロジェクト: openstack/aodh
 def test_notify_alarm_with_batch_listener(self, logger):
     data1 = {
         "actions": ["test://"],
         "alarm_id": "foobar",
         "alarm_name": "testalarm",
         "severity": "critical",
         "previous": "OK",
         "current": "ALARM",
         "reason": "Everything is on fire",
         "reason_data": {"fire": "everywhere"},
     }
     data2 = {
         "actions": ["test://"],
         "alarm_id": "foobar2",
         "alarm_name": "testalarm2",
         "severity": "low",
         "previous": "ALARM",
         "current": "OK",
         "reason": "Everything is fine",
         "reason_data": {"fine": "fine"},
     }
     self.service.terminate()
     self.CONF.set_override("batch_size", 2, "notifier")
     # Init a new service with new configuration
     self.svc = notifier.AlarmNotifierService(0, self.CONF)
     self.addCleanup(self.svc.terminate)
     self._msg_notifier.sample({}, "alarm.update", data1)
     self._msg_notifier.sample({}, "alarm.update", data2)
     time.sleep(1)
     notifications = self.svc.notifiers["test"].obj.notifications
     self.assertEqual(2, len(notifications))
     self.assertEqual(
         (
             urlparse.urlsplit(data1["actions"][0]),
             data1["alarm_id"],
             data1["alarm_name"],
             data1["severity"],
             data1["previous"],
             data1["current"],
             data1["reason"],
             data1["reason_data"],
         ),
         notifications[0],
     )
     self.assertEqual(
         (
             urlparse.urlsplit(data2["actions"][0]),
             data2["alarm_id"],
             data2["alarm_name"],
             data2["severity"],
             data2["previous"],
             data2["current"],
             data2["reason"],
             data2["reason_data"],
         ),
         notifications[1],
     )
     self.assertEqual(mock.call("Received %s messages in batch.", 2), logger.call_args_list[0])
コード例 #7
0
ファイル: httpclient.py プロジェクト: LongXQ/tricircle
def get_bottom_url(t_ver, t_url, b_ver, b_endpoint):
    """get_bottom_url

    convert url received by Tricircle service to bottom OpenStack
    request url through the configured endpoint in the KeyStone

    :param t_ver: version of top service
    :param t_url: request url to the top service
    :param b_ver: version of bottom service
    :param b_endpoint: endpoint registered in keystone for bottom service
    :return: request url to bottom service
    """
    t_parse = urlparse.urlsplit(t_url)

    after_ver = t_parse.path

    remove_ver = '/' + t_ver + '/'
    pos = after_ver.find(remove_ver)

    if pos == 0:
        after_ver = after_ver[len(remove_ver):]
    else:
        remove_ver = t_ver + '/'
        pos = after_ver.find(remove_ver)
        if pos == 0:
            after_ver = after_ver[len(remove_ver):]

    if after_ver == t_parse.path:
        # wrong t_url
        return ''

    b_parse = urlparse.urlsplit(b_endpoint)

    scheme = b_parse.scheme
    netloc = b_parse.netloc
    path = '/' + b_ver + '/' + after_ver
    if b_ver == '':
        path = '/' + after_ver

    # Remove availability_zone filter since it is handled by VolumeController.
    # VolumeController will send GET request only to bottom pods whose AZ
    # is specified in availability_zone filter.
    query_filters = []
    for k, v in urlparse.parse_qsl(t_parse.query):
        if k == 'availability_zone':
            continue
        query_filters.append((k, v))
    query = urlparse.urlencode(query_filters)

    fragment = t_parse.fragment

    b_url = urlparse.urlunsplit((scheme,
                                 netloc,
                                 path,
                                 query,
                                 fragment))
    return b_url
コード例 #8
0
ファイル: misc.py プロジェクト: hasgeek/coaster
def __clean_external_url(url):
    if url.startswith(('http://', 'https://', '//')):
        # Do the domains and ports match?
        pnext = urlsplit(url)
        preq = urlsplit(request.url)
        if pnext.port != preq.port:
            return ''
        if not (pnext.hostname == preq.hostname or pnext.hostname.endswith('.' + preq.hostname)):
            return ''
    return url
コード例 #9
0
ファイル: biostar.py プロジェクト: ImmPortDB/immport-galaxy
def create_cookie(trans, key_name, key, email, age=DEFAULT_BIOSTAR_COOKIE_AGE, override_never_authenticate=False):
    if trans.app.config.biostar_never_authenticate and not override_never_authenticate:
        log.debug('A BioStar link was clicked, but never authenticate has been enabled, so we will not create the login cookie.')
        return
    digest = hmac.new(key, email).hexdigest()
    value = "%s:%s" % (email, digest)
    trans.set_cookie(value, name=key_name, path='/', age=age, version='1')
    # We need to explicitly set the domain here, in order to allow for biostar in a subdomain to work
    galaxy_hostname = urlsplit(url_for('/', qualified=True)).hostname
    biostar_hostname = urlsplit(trans.app.config.biostar_url).hostname
    trans.response.cookies[key_name]['domain'] = determine_cookie_domain(galaxy_hostname, biostar_hostname)
コード例 #10
0
ファイル: api_common.py プロジェクト: AradhanaSingh/neutron
def prepare_url(orig_url):
    """Takes a link and swaps in network_link_prefix if set."""
    prefix = cfg.CONF.network_link_prefix
    # Copied directly from nova/api/openstack/common.py
    if not prefix:
        return orig_url
    url_parts = list(parse.urlsplit(orig_url))
    prefix_parts = list(parse.urlsplit(prefix))
    url_parts[0:2] = prefix_parts[0:2]
    url_parts[2] = prefix_parts[2] + url_parts[2]
    return parse.urlunsplit(url_parts).rstrip('/')
コード例 #11
0
ファイル: test_persona.py プロジェクト: ausbin/mediagoblin
        def _test_edit_persona():
            # Try and delete only Persona email address
            template.clear_test_template_context()
            res = persona_plugin_app.post(
                '/edit/persona/',
                {'email': '*****@*****.**'})

            assert 'mediagoblin/plugins/persona/edit.html' in template.TEMPLATE_TEST_CONTEXT
            context = template.TEMPLATE_TEST_CONTEXT['mediagoblin/plugins/persona/edit.html']
            form = context['form']

            assert form.email.errors == [u"You can't delete your only Persona email address unless you have a password set."]

            template.clear_test_template_context()
            res = persona_plugin_app.post(
                '/edit/persona/', {})

            assert 'mediagoblin/plugins/persona/edit.html' in template.TEMPLATE_TEST_CONTEXT
            context = template.TEMPLATE_TEST_CONTEXT['mediagoblin/plugins/persona/edit.html']
            form = context['form']

            assert form.email.errors == [u'This field is required.']

            # Try and delete Persona not owned by the user
            template.clear_test_template_context()
            res = persona_plugin_app.post(
                '/edit/persona/',
                {'email': '*****@*****.**'})

            assert 'mediagoblin/plugins/persona/edit.html' in template.TEMPLATE_TEST_CONTEXT
            context = template.TEMPLATE_TEST_CONTEXT['mediagoblin/plugins/persona/edit.html']
            form = context['form']

            assert form.email.errors == [u'That Persona email address is not registered to this account.']

            res = persona_plugin_app.get('/edit/persona/add/')

            assert urlparse.urlsplit(res.location)[2] == '/edit/persona/'

            # Add Persona email address
            template.clear_test_template_context()
            res = persona_plugin_app.post(
                '/edit/persona/add/')
            res.follow()

            assert urlparse.urlsplit(res.location)[2] == '/edit/account/'

            # Delete a Persona
            res = persona_plugin_app.post(
                '/edit/persona/',
                {'email': '*****@*****.**'})
            res.follow()

            assert urlparse.urlsplit(res.location)[2] == '/edit/account/'
コード例 #12
0
    def assert_valid_temp_url(self, name):
        url = self.backend.url(name)
        split_url = urlparse.urlsplit(url)
        query_params = urlparse.parse_qs(split_url[3])
        split_base_url = urlparse.urlsplit(base_url(container=self.backend.container_name, path=name))

        # ensure scheme, netloc, and path are same as to non-temporary URL
        self.assertEqual(split_base_url[0:2], split_url[0:2])

        # ensure query string contains signature and expiry
        self.assertIn('temp_url_sig', query_params)
        self.assertIn('temp_url_expires', query_params)
コード例 #13
0
ファイル: html.py プロジェクト: SmartTeleMax/iktomi
    def extra_clean(self, doc):
        for el in doc.xpath('//*[@href]'):
            scheme, netloc, path, query, fragment = urlsplit(el.attrib['href'])
            if scheme and scheme not in self.allowed_protocols:
                el.drop_tag()

        for attr in self.attr_val_is_uri:
            if attr == 'href':
                continue
            for el in doc.xpath('//*[@'+attr+']'):
                scheme, netloc, path, query, fragment = urlsplit(el.attrib[attr])
                scheme_fail = scheme and scheme not in self.allowed_protocols
                netloc_fail = not self.allow_external_src and netloc
                if scheme_fail or netloc_fail:
                    if attr == 'src':
                        el.drop_tag()
                    else:
                        el.attrib.pop(attr)

        if self.a_without_href:
            for link in doc.xpath('//a[not(@href)]'):
                link.drop_tag()

        if self.allow_classes is not None:
            for el in doc.xpath('//*[@class]'):
                classes = filter(None, el.attrib['class'].split())
                if el.tag in self.allow_classes:
                    allowed = self.allow_classes[el.tag]
                    condition = allowed if callable(allowed) else \
                            (lambda cls: cls in allowed)
                    classes = filter(condition, classes)
                else:
                    classes = []

                if classes:
                    el.attrib['class'] = ' '.join(classes)
                else:
                    el.attrib.pop('class')


        for callback in self.dom_callbacks:
            callback(doc)

        if self.wrap_inline_tags is not False and self.tags_to_wrap:
            self.clean_top(doc)

        if self.split_paragraphs_by_br:
            self.remove_brs_from_pars(doc)

        for tag in self.drop_empty_tags:
            for el in doc.xpath('//'+tag):
                if not el.attrib and self.is_element_empty(el):
                    el.drop_tree()
コード例 #14
0
ファイル: remote.py プロジェクト: cbowman0/graphite-web
    def parse_host(host):
        if host.startswith('http://') or host.startswith('https://'):
            parsed = urlsplit(host)
        else:
            scheme = 'https' if settings.INTRACLUSTER_HTTPS else 'http'
            parsed = urlsplit(scheme + '://' + host)

        return {
          'host': parsed.netloc,
          'url': '%s://%s%s' % (parsed.scheme, parsed.netloc, parsed.path),
          'params': {key: value[-1] for (key, value) in parse_qs(parsed.query).items()},
        }
コード例 #15
0
ファイル: test_notifier.py プロジェクト: ISCAS-VDI/aodh-base
 def test_notify_alarm_with_batch_listener(self, logger):
     data1 = {
         'actions': ['test://'],
         'alarm_id': 'foobar',
         'alarm_name': 'testalarm',
         'severity': 'critical',
         'previous': 'OK',
         'current': 'ALARM',
         'reason': 'Everything is on fire',
         'reason_data': {'fire': 'everywhere'}
     }
     data2 = {
         'actions': ['test://'],
         'alarm_id': 'foobar2',
         'alarm_name': 'testalarm2',
         'severity': 'low',
         'previous': 'ALARM',
         'current': 'OK',
         'reason': 'Everything is fine',
         'reason_data': {'fine': 'fine'}
     }
     self.service.stop()
     self.CONF.set_override("batch_size", 2, 'notifier')
     # Init a new service with new configuration
     self.svc = notifier.AlarmNotifierService(self.CONF)
     self.svc.start()
     self._msg_notifier.sample({}, 'alarm.update', data1)
     self._msg_notifier.sample({}, 'alarm.update', data2)
     time.sleep(1)
     notifications = self.svc.notifiers['test'].obj.notifications
     self.assertEqual(2, len(notifications))
     self.assertEqual((urlparse.urlsplit(data1['actions'][0]),
                       data1['alarm_id'],
                       data1['alarm_name'],
                       data1['severity'],
                       data1['previous'],
                       data1['current'],
                       data1['reason'],
                       data1['reason_data']),
                      notifications[0])
     self.assertEqual((urlparse.urlsplit(data2['actions'][0]),
                       data2['alarm_id'],
                       data2['alarm_name'],
                       data2['severity'],
                       data2['previous'],
                       data2['current'],
                       data2['reason'],
                       data2['reason_data']),
                      notifications[1])
     self.assertEqual(mock.call('Received %s messages in batch.', 2),
                      logger.call_args_list[0])
     self.svc.stop()
コード例 #16
0
ファイル: Address.py プロジェクト: netvigator/myPyPacks
def UrlMustHaveSchemeAndPath( sURL ):
    #
    #from Utils.Both2n3 import urlsplit, urlunsplit
    #
    sScheme, sLocation, sPath, sQuery, sFragmentID = urlsplit( sURL )
    #
    if sScheme not in ( 'http', 'https', 'ftp' ): # if you got no scheme, urlsplit gets confused
        #
        sScheme, sLocation, sPath, sQuery, sFragmentID = urlsplit( 'http://' + sURL )
        #
    #
    if not sPath: sPath = '/'
    #
    return urlunsplit( ( sScheme, sLocation, sPath, sQuery, sFragmentID ) )
コード例 #17
0
ファイル: test_openid.py プロジェクト: ausbin/mediagoblin
        def _test_new_user():
            openid_plugin_app.post(
                '/auth/openid/login/', {
                    'openid': u'http://real.myopenid.com'})

            # Right place?
            assert 'mediagoblin/auth/register.html' in template.TEMPLATE_TEST_CONTEXT
            context = template.TEMPLATE_TEST_CONTEXT['mediagoblin/auth/register.html']
            register_form = context['register_form']

            # Register User
            res = openid_plugin_app.post(
                '/auth/openid/register/', {
                    'openid': register_form.openid.data,
                    'username': u'chris',
                    'email': u'*****@*****.**'})
            res.follow()

            # Correct place?
            assert urlparse.urlsplit(res.location)[2] == '/u/chris/'
            assert 'mediagoblin/user_pages/user_nonactive.html' in template.TEMPLATE_TEST_CONTEXT

            # No need to test if user is in logged in and verification email
            # awaits, since openid uses the register_user function which is
            # tested in test_auth

            # Logout User
            openid_plugin_app.get('/auth/logout')

            # Get user and detach from session
            test_user = mg_globals.database.LocalUser.query.filter(
                LocalUser.username==u'chris'
            ).first()
            Session.expunge(test_user)

            # Log back in
            # Could not get it to work by 'POST'ing to /auth/openid/login/
            template.clear_test_template_context()
            res = openid_plugin_app.post(
                '/auth/openid/login/finish/', {
                    'openid': u'http://real.myopenid.com'})
            res.follow()

            assert urlparse.urlsplit(res.location)[2] == '/'
            assert 'mediagoblin/root.html' in template.TEMPLATE_TEST_CONTEXT

            # Make sure user is in the session
            context = template.TEMPLATE_TEST_CONTEXT['mediagoblin/root.html']
            session = context['request'].session
            assert session['user_id'] == six.text_type(test_user.id)
コード例 #18
0
    def _get_version_match(self, endpoint, profile_version, service_type):
        """Return the best matching version

        Look through each version trying to find the best match for
        the version specified in this profile.
         * The best match will only ever be found within the same
           major version, meaning a v2 profile will never match if
           only v3 is available on the server.
         * The search for the best match is fuzzy if needed.
           * If the profile specifies v2 and the server has
             v2.0, v2.1, and v2.2, the match will be v2.2.
           * When an exact major/minor is specified, e.g., v2.0,
             it will only match v2.0.
        """

        match_version = None

        for version in endpoint.versions:
            api_version = self._parse_version(version["id"])
            if profile_version.major != api_version.major:
                continue

            if profile_version.minor <= api_version.minor:
                for link in version["links"]:
                    if link["rel"] == "self":
                        resp_link = link['href']
                        match_version = parse.urlsplit(resp_link).path

            # Only break out of the loop on an exact match,
            # otherwise keep trying.
            if profile_version.minor == api_version.minor:
                break

        if match_version is None:
            raise exceptions.EndpointNotFound(
                "Unable to determine endpoint for %s" % service_type)

        # Make sure the root endpoint has no overlap with match_version
        root_parts = parse.urlsplit(endpoint.uri)
        match_version = match_version.replace(root_parts.path, "", 1)
        match = utils.urljoin(endpoint.uri, match_version)

        # For services that require the project id in the request URI,
        # add them in here.
        if endpoint.needs_project_id:
            match = utils.urljoin(match, endpoint.project_id)

        return match
コード例 #19
0
ファイル: common.py プロジェクト: anjoah/nova
def remove_version_from_href(href):
    """Removes the first api version from the href.

    Given: 'http://www.nova.com/v1.1/123'
    Returns: 'http://www.nova.com/123'

    Given: 'http://www.nova.com/v1.1'
    Returns: 'http://www.nova.com'

    """
    parsed_url = urlparse.urlsplit(href)
    url_parts = parsed_url.path.split('/', 2)

    # NOTE: this should match vX.X or vX
    expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
    if expression.match(url_parts[1]):
        del url_parts[1]

    new_path = '/'.join(url_parts)

    if new_path == parsed_url.path:
        LOG.debug('href %s does not contain version' % href)
        raise ValueError(_('href %s does not contain version') % href)

    parsed_url = list(parsed_url)
    parsed_url[2] = new_path
    return urlparse.urlunsplit(parsed_url)
コード例 #20
0
 def _get_parameters_from_request(self, request, exception=False):
     """Get parameters to log in OPERATION_LOG."""
     user = request.user
     referer_url = None
     try:
         referer_dic = urlparse.urlsplit(
             urlparse.unquote(request.META.get('HTTP_REFERER')))
         referer_url = referer_dic[2]
         if referer_dic[3]:
             referer_url += "?" + referer_dic[3]
         if isinstance(referer_url, str):
             referer_url = referer_url.decode('utf-8')
     except Exception:
         pass
     return {
         'domain_name': getattr(user, 'domain_name', None),
         'domain_id': getattr(user, 'domain_id', None),
         'project_name': getattr(user, 'project_name', None),
         'project_id': getattr(user, 'project_id', None),
         'user_name': getattr(user, 'username', None),
         'user_id': request.session.get('user_id', None),
         'request_scheme': request.scheme,
         'referer_url': referer_url,
         'request_url': urlparse.unquote(request.path),
         'method': request.method if not exception else None,
         'param': self._get_request_param(request),
     }
コード例 #21
0
ファイル: intermanual.py プロジェクト: fviolette/docs-tools
def _get_safe_url(url):
    """Gets version of *url* with basic auth passwords obscured. This function
    returns results suitable for printing and logging.

    E.g.: https://user:[email protected] => https://user:********@example.com

    .. note::

       The number of astrisks is invariant in the length of the basic auth
       password, so minimal information is leaked.

    :param url: a url
    :type url: ``str``

    :return: *url* with password obscured
    :rtype: ``str``
    """
    safe_url = url
    url, username, _ = _strip_basic_auth(url)
    if username is not None:
        # case: url contained basic auth creds; obscure password
        url_parts = parse.urlsplit(url)
        safe_netloc = '{0}@{1}'.format(username, url_parts.hostname)
        # replace original netloc w/ obscured version
        frags = list(url_parts)
        frags[1] = safe_netloc
        safe_url = parse.urlunsplit(frags)

    return safe_url
コード例 #22
0
ファイル: intermanual.py プロジェクト: fviolette/docs-tools
def _strip_basic_auth(url):
    """Returns *url* with basic auth credentials removed. Also returns the
    basic auth username and password if they're present in *url*.

    E.g.: https://user:[email protected] => https://example.com

    *url* need not include basic auth credentials.

    :param url: url which may or may not contain basic auth credentials
    :type url: ``str``

    :return: 3-``tuple`` of:

      * (``str``) -- *url* with any basic auth creds removed
      * (``str`` or ``NoneType``) -- basic auth username or ``None`` if basic
        auth username not given
      * (``str`` or ``NoneType``) -- basic auth password or ``None`` if basic
        auth password not given

    :rtype: ``tuple``
    """
    url_parts = parse.urlsplit(url)
    username = url_parts.username
    password = url_parts.password
    frags = list(url_parts)
    # swap out "user[:pass]@hostname" for "hostname"
    frags[1] = url_parts.hostname
    url = parse.urlunsplit(frags)
    return (url, username, password)
コード例 #23
0
ファイル: models.py プロジェクト: johnfelipe/sayit
    def save(self, *args, **kwargs):
        if self.image:
            try:
                tmp_filename, headers = urlretrieve(self.image)

                max_filename_length = self.image_cache.field.max_length  # Usually 100
                template_needs = len(self.image_cache_file_path_template % '')

                # We'll use the actual instance label size rather than the maximum that it could be
                # instance_label_length = self.instance._meta.get_field_by_name('label')[0].max_length
                instance_label_length = len(self.instance.label)
                truncate_to = max_filename_length - template_needs - instance_label_length - 8

                image_filename = os.path.basename(urlsplit(self.image).path)
                image_filename = url_to_unicode(image_filename)
                filename_root, extension = os.path.splitext(image_filename)

                truncated_filename = filename_root[:(truncate_to - len(extension))] + extension

                self.image_cache.save(
                    truncated_filename,
                    File(open(tmp_filename, 'rb')),

                    # Calling save on a FieldFile causes a save on the model instance
                    # we don't need that as we're about to do it below (without this
                    # we get an infinite recursion).
                    save=False,
                    )
            except HTTPError:
                pass

        return super(Speaker, self).save(*args, **kwargs)
コード例 #24
0
ファイル: test_notifier.py プロジェクト: openstack/aodh
 def test_notify_alarm(self):
     data = {
         "actions": ["test://"],
         "alarm_id": "foobar",
         "alarm_name": "testalarm",
         "severity": "critical",
         "previous": "OK",
         "current": "ALARM",
         "reason": "Everything is on fire",
         "reason_data": {"fire": "everywhere"},
     }
     self._msg_notifier.sample({}, "alarm.update", data)
     time.sleep(1)
     notifications = self.service.notifiers["test"].obj.notifications
     self.assertEqual(1, len(notifications))
     self.assertEqual(
         (
             urlparse.urlsplit(data["actions"][0]),
             data["alarm_id"],
             data["alarm_name"],
             data["severity"],
             data["previous"],
             data["current"],
             data["reason"],
             data["reason_data"],
         ),
         notifications[0],
     )
コード例 #25
0
ファイル: common.py プロジェクト: openstack/manila
def remove_version_from_href(href):
    """Removes the first api version from the href.

    Given: 'http://manila.example.com/v1.1/123'
    Returns: 'http://manila.example.com/123'

    Given: 'http://www.manila.com/v1.1'
    Returns: 'http://www.manila.com'

    Given: 'http://manila.example.com/share/v1.1/123'
    Returns: 'http://manila.example.com/share/123'

    """
    parsed_url = parse.urlsplit(href)
    url_parts = parsed_url.path.split('/')

    # NOTE: this should match vX.X or vX
    expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
    for x in range(len(url_parts)):
        if expression.match(url_parts[x]):
            del url_parts[x]
            break

    new_path = '/'.join(url_parts)

    if new_path == parsed_url.path:
        msg = 'href %s does not contain version' % href
        LOG.debug(msg)
        raise ValueError(msg)

    parsed_url = list(parsed_url)
    parsed_url[2] = new_path
    return parse.urlunsplit(parsed_url)
コード例 #26
0
ファイル: mercurial.py プロジェクト: halvorlu/rbtools
        def _info(r):
            m = re.search(r, svn_info, re.M)

            if m:
                return urlsplit(m.group(1))
            else:
                return None
コード例 #27
0
ファイル: processor.py プロジェクト: Kayle009/sentry
def generate_module(src):
    """
    Converts a url into a made-up module name by doing the following:
     * Extract just the path name ignoring querystrings
     * Trimming off the initial /
     * Trimming off the file extension
     * Removes off useless folder prefixes

    e.g. http://google.com/js/v1.0/foo/bar/baz.js -> foo/bar/baz
    """
    if not src:
        return UNKNOWN_MODULE

    filename, ext = splitext(urlsplit(src).path)
    if filename.endswith('.min'):
        filename = filename[:-4]

    # TODO(dcramer): replace CLEAN_MODULE_RE with tokenizer completely
    tokens = filename.split('/')
    for idx, token in enumerate(tokens):
        # a SHA
        if VERSION_RE.match(token):
            return '/'.join(tokens[idx + 1:])

    return CLEAN_MODULE_RE.sub('', filename) or UNKNOWN_MODULE
コード例 #28
0
ファイル: case.py プロジェクト: ravichandrasadineni/gabbi
    def _parse_url(self, url, ssl=False):
        """Create a url from test data.

        If provided with a full URL, just return that. If SSL is requested
        set the scheme appropriately.

        Scheme and netloc are saved for later use in comparisons.
        """
        parsed_url = urlparse.urlsplit(url)
        url_scheme = parsed_url[0]
        scheme = 'http'
        netloc = self.host

        if not url_scheme:
            if self.port:
                netloc = '%s:%s' % (self.host, self.port)
            if ssl:
                scheme = 'https'
            path = parsed_url[2]
            if self.prefix:
                path = '%s%s' % (self.prefix, path)
            full_url = urlparse.urlunsplit((scheme, netloc, path,
                                            parsed_url[3], ''))
            self.scheme = scheme
            self.netloc = netloc
        else:
            full_url = url
            self.scheme = url_scheme
            self.netloc = parsed_url[1]

        return full_url
コード例 #29
0
ファイル: core.py プロジェクト: nharraud/jsonresolver
 def resolve(self, url):
     """Resolve given URL and use regitered loader."""
     if self.url_map is None:
         self._build_url_map()
     parts = urlsplit(url)
     loader, args = self.url_map.bind(parts.hostname).match(parts.path)
     return loader(**args)
コード例 #30
0
ファイル: opener.py プロジェクト: Tarrasch/luigi
    def open(self, target_uri, **kwargs):
        """Open target uri.

        :param target_uri: Uri to open
        :type target_uri: string

        :returns: Target object

        """
        target = urlsplit(target_uri, scheme=self.default_opener)

        opener = self.get_opener(target.scheme)
        query = opener.conform_query(target.query)

        target = opener.get_target(
            target.scheme,
            target.path,
            target.fragment,
            target.username,
            target.password,
            target.hostname,
            target.port,
            query,
            **kwargs
        )
        target.opener_path = target_uri

        return target
コード例 #31
0
 def __init__(self, url, client_dir, client_cert=None):
     self._url = url
     self._server_url = self._extract_server_url(url)
     self._client_dir = client_dir
     self._client_cert = client_cert
     self.use(urlsplit(url)[2])
コード例 #32
0
 def _extract_server_url(self, url):
     parts = urlsplit(url)
     return urlunsplit((parts.scheme, parts.netloc, '', '', ''))
コード例 #33
0
def url_path_append(url, suffix):
    scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
    path = (path + suffix).replace('//', '/')
    return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
コード例 #34
0
def check_parsed(repo_root, path, f):
    source_file = SourceFile(repo_root, path, "/", contents=f.read())

    errors = []

    if path.startswith("css/"):
        if (source_file.type == "support" and not source_file.name_is_non_test
                and not source_file.name_is_reference):
            return [rules.SupportWrongDir.error(path)]

        if (source_file.type != "support" and not source_file.name_is_reference
                and not source_file.spec_links):
            return [rules.MissingLink.error(path)]

    if source_file.name_is_non_test:
        return []

    if source_file.markup_type is None:
        return []

    if source_file.root is None:
        return [rules.ParseFailed.error(path)]

    if source_file.type == "manual" and not source_file.name_is_manual:
        errors.append(rules.ContentManual.error(path))

    if source_file.type == "visual" and not source_file.name_is_visual:
        errors.append(rules.ContentVisual.error(path))

    about_blank_parts = urlsplit("about:blank")
    for reftest_node in source_file.reftest_nodes:
        href = reftest_node.attrib.get("href", "").strip(space_chars)
        parts = urlsplit(href)

        if parts == about_blank_parts:
            continue

        if (parts.scheme or parts.netloc):
            errors.append(rules.AbsoluteUrlRef.error(path, (href, )))
            continue

        ref_url = urljoin(source_file.url, href)
        ref_parts = urlsplit(ref_url)

        if source_file.url == ref_url:
            errors.append(rules.SameFileRef.error(path))
            continue

        assert ref_parts.path != ""

        reference_file = os.path.join(repo_root, ref_parts.path[1:])
        reference_rel = reftest_node.attrib.get("rel", "")

        if not os.path.isfile(reference_file):
            errors.append(
                rules.NonexistentRef.error(path, (reference_rel, href)))

    if len(source_file.timeout_nodes) > 1:
        errors.append(rules.MultipleTimeout.error(path))

    for timeout_node in source_file.timeout_nodes:
        timeout_value = timeout_node.attrib.get("content", "").lower()
        if timeout_value != "long":
            errors.append(rules.InvalidTimeout.error(path, (timeout_value, )))

    if source_file.testharness_nodes:
        if len(source_file.testharness_nodes) > 1:
            errors.append(rules.MultipleTestharness.error(path))

        testharnessreport_nodes = source_file.root.findall(
            ".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']"
        )
        if not testharnessreport_nodes:
            errors.append(rules.MissingTestharnessReport.error(path))
        else:
            if len(testharnessreport_nodes) > 1:
                errors.append(rules.MultipleTestharnessReport.error(path))

        testharnesscss_nodes = source_file.root.findall(
            ".//{http://www.w3.org/1999/xhtml}link[@href='/resources/testharness.css']"
        )
        if testharnesscss_nodes:
            errors.append(rules.PresentTestharnessCSS.error(path))

        for element in source_file.variant_nodes:
            if "content" not in element.attrib:
                errors.append(rules.VariantMissing.error(path))
            else:
                variant = element.attrib["content"]
                if variant != "" and variant[0] not in ("?", "#"):
                    errors.append(rules.MalformedVariant.error(path, (path, )))

        seen_elements = {
            "timeout": False,
            "testharness": False,
            "testharnessreport": False
        }
        required_elements = [
            key for key, value in {
                "testharness": True,
                "testharnessreport": len(testharnessreport_nodes) > 0,
                "timeout": len(source_file.timeout_nodes) > 0
            }.items() if value
        ]

        for elem in source_file.root.iter():
            if source_file.timeout_nodes and elem == source_file.timeout_nodes[
                    0]:
                seen_elements["timeout"] = True
                if seen_elements["testharness"]:
                    errors.append(rules.LateTimeout.error(path))

            elif elem == source_file.testharness_nodes[0]:
                seen_elements["testharness"] = True

            elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
                seen_elements["testharnessreport"] = True
                if not seen_elements["testharness"]:
                    errors.append(rules.EarlyTestharnessReport.error(path))

            if all(seen_elements[name] for name in required_elements):
                break

    if source_file.testdriver_nodes:
        if len(source_file.testdriver_nodes) > 1:
            errors.append(rules.MultipleTestdriver.error(path))

        testdriver_vendor_nodes = source_file.root.findall(
            ".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testdriver-vendor.js']"
        )
        if not testdriver_vendor_nodes:
            errors.append(rules.MissingTestdriverVendor.error(path))
        else:
            if len(testdriver_vendor_nodes) > 1:
                errors.append(rules.MultipleTestdriverVendor.error(path))

    for element in source_file.root.findall(
            ".//{http://www.w3.org/1999/xhtml}script[@src]"):
        src = element.attrib["src"]

        def incorrect_path(script, src):
            return (script == src or ("/%s" % script in src
                                      and src != "/resources/%s" % script))

        if incorrect_path("testharness.js", src):
            errors.append(rules.TestharnessPath.error(path))

        if incorrect_path("testharnessreport.js", src):
            errors.append(rules.TestharnessReportPath.error(path))

        if incorrect_path("testdriver.js", src):
            errors.append(rules.TestdriverPath.error(path))

        if incorrect_path("testdriver-vendor.js", src):
            errors.append(rules.TestdriverVendorPath.error(path))

    return errors
コード例 #35
0
ファイル: lint.py プロジェクト: shacharz/web-platform-tests
def check_parsed(repo_root, path, f):
    source_file = SourceFile(repo_root, path, "/", contents=f.read())

    errors = []

    if path.startswith("css/"):
        if (source_file.type == "support" and
            not source_file.name_is_non_test and
            not source_file.name_is_reference):
            return [("SUPPORT-WRONG-DIR", "Support file not in support directory", path, None)]

        if (source_file.type != "support" and
            not source_file.name_is_reference and
            not source_file.spec_links):
            return [("MISSING-LINK", "Testcase file must have a link to a spec", path, None)]

    if source_file.name_is_non_test or source_file.name_is_manual:
        return []

    if source_file.markup_type is None:
        return []

    if source_file.root is None:
        return [("PARSE-FAILED", "Unable to parse file", path, None)]

    if source_file.type == "manual" and not source_file.name_is_manual:
        return [("CONTENT-MANUAL", "Manual test whose filename doesn't end in '-manual'", path, None)]

    if source_file.type == "visual" and not source_file.name_is_visual:
        return [("CONTENT-VISUAL", "Visual test whose filename doesn't end in '-visual'", path, None)]

    for reftest_node in source_file.reftest_nodes:
        href = reftest_node.attrib.get("href", "").strip(space_chars)
        parts = urlsplit(href)
        if (parts.scheme or parts.netloc) and parts != urlsplit("about:blank"):
            errors.append(("ABSOLUTE-URL-REF",
                     "Reference test with a reference file specified via an absolute URL: '%s'" % href, path, None))
            continue

        ref_url = urljoin(source_file.url, href)
        ref_parts = urlsplit(ref_url)

        if source_file.url == ref_url:
            errors.append(("SAME-FILE-REF",
                           "Reference test which points at itself as a reference",
                           path,
                           None))
            continue

        assert ref_parts.path != ""

        reference_file = os.path.join(repo_root, ref_parts.path[1:])
        reference_rel = reftest_node.attrib.get("rel", "")

        if not os.path.isfile(reference_file):
            errors.append(("NON-EXISTENT-REF",
                     "Reference test with a non-existent '%s' relationship reference: '%s'" % (reference_rel, href), path, None))

    if len(source_file.timeout_nodes) > 1:
        errors.append(("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", path, None))

    for timeout_node in source_file.timeout_nodes:
        timeout_value = timeout_node.attrib.get("content", "").lower()
        if timeout_value != "long":
            errors.append(("INVALID-TIMEOUT", "Invalid timeout value %s" % timeout_value, path, None))

    if source_file.testharness_nodes:
        if len(source_file.testharness_nodes) > 1:
            errors.append(("MULTIPLE-TESTHARNESS",
                           "More than one <script src='/resources/testharness.js'>", path, None))

        testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
        if not testharnessreport_nodes:
            errors.append(("MISSING-TESTHARNESSREPORT",
                           "Missing <script src='/resources/testharnessreport.js'>", path, None))
        else:
            if len(testharnessreport_nodes) > 1:
                errors.append(("MULTIPLE-TESTHARNESSREPORT",
                               "More than one <script src='/resources/testharnessreport.js'>", path, None))

        testharnesscss_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}link[@href='/resources/testharness.css']")
        if testharnesscss_nodes:
            errors.append(("PRESENT-TESTHARNESSCSS",
                           "Explicit link to testharness.css present", path, None))

        for element in source_file.variant_nodes:
            if "content" not in element.attrib:
                errors.append(("VARIANT-MISSING",
                               "<meta name=variant> missing 'content' attribute", path, None))
            else:
                variant = element.attrib["content"]
                if variant != "" and variant[0] not in ("?", "#"):
                    errors.append(("MALFORMED-VARIANT",
                               "%s <meta name=variant> 'content' attribute must be the empty string or start with '?' or '#'" % path, None))

        seen_elements = {"timeout": False,
                         "testharness": False,
                         "testharnessreport": False}
        required_elements = [key for key, value in {"testharness": True,
                                                    "testharnessreport": len(testharnessreport_nodes) > 0,
                                                    "timeout": len(source_file.timeout_nodes) > 0}.items()
                             if value]

        for elem in source_file.root.iter():
            if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
                seen_elements["timeout"] = True
                if seen_elements["testharness"]:
                    errors.append(("LATE-TIMEOUT",
                                   "<meta name=timeout> seen after testharness.js script", path, None))

            elif elem == source_file.testharness_nodes[0]:
                seen_elements["testharness"] = True

            elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
                seen_elements["testharnessreport"] = True
                if not seen_elements["testharness"]:
                    errors.append(("EARLY-TESTHARNESSREPORT",
                                   "testharnessreport.js script seen before testharness.js script", path, None))

            if all(seen_elements[name] for name in required_elements):
                break

    if source_file.testdriver_nodes:
        if len(source_file.testdriver_nodes) > 1:
            errors.append(("MULTIPLE-TESTDRIVER",
                           "More than one <script src='/resources/testdriver.js'>", path, None))

        testdriver_vendor_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testdriver-vendor.js']")
        if not testdriver_vendor_nodes:
            errors.append(("MISSING-TESTDRIVER-VENDOR",
                           "Missing <script src='/resources/testdriver-vendor.js'>", path, None))
        else:
            if len(testdriver_vendor_nodes) > 1:
                errors.append(("MULTIPLE-TESTDRIVER-VENDOR",
                               "More than one <script src='/resources/testdriver-vendor.js'>", path, None))

    for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"):
        src = element.attrib["src"]
        for name in ["testharness", "testharnessreport", "testdriver", "testdriver-vendor"]:
            if "%s.js" % name == src or ("/%s.js" % name in src and src != "/resources/%s.js" % name):
                errors.append(("%s-PATH" % name.upper(), "%s.js script seen with incorrect path" % name, path, None))

    return errors
コード例 #36
0
 def _get_url_path(url):
     # Remove schema from url
     _, _, path, query, _ = urlsplit(url)
     url = urlunsplit(("", "", path, query, ""))
     return url
コード例 #37
0
ファイル: publish_to_pypi.py プロジェクト: Etiqa/bromine
    elif len(git_head_tags) > 1:
        raise Exception('Git HEAD has multiple tags: '
                        + ', '.join('"{}"'.format(t) for t in git_head_tags))
    args.tag = git_head_tags[0]
assert args.tag
print('\nPublishing version "{}" on PyPI\n'.format(args.tag))

# get internal artifacts repository url
with io.open(Path.home() / '.pip' / 'pip.conf', 'r', encoding='utf-8') as fin:
    repo_url = re.search(
        r'^index-url = (?P<url>.+)$',
        fin.read(),
        flags=re.MULTILINE
    ).group('url')
    repo_url = urlunsplit(
        urlsplit(repo_url)._replace(path='', query='', fragment='')
    )

# get artifact url corresponding to the version to publish
artifact_url = None
components_api = '/service/rest/beta/components?repository=bromine'
r = requests.get(urljoin(repo_url, components_api))
while not artifact_url:
    r = r.json()
    match = [i for i in r['items'] if i['version'] == args.tag]
    if match:
        artifact_data = match[0]['assets'][0]
        artifact_url = urljoin(repo_url, '/repository/bromine/' + artifact_data['path'])
        artifact_sha256 = artifact_data['checksum']['sha256']
    else:
        token = r['continuationToken']
コード例 #38
0
def add_command(config, tasks):
    keyed_by_args = [
        "channel",
        "archive-prefix",
        "previous-archive-prefix",
        "aus-server",
        "override-certs",
        "include-version",
        "mar-channel-id-override",
        "last-watershed",
    ]
    optional_args = [
        "updater-platform",
    ]

    release_config = get_release_config(config)

    for task in tasks:
        task["description"] = "generate update verify config for {}".format(
            task["attributes"]["build_platform"])

        command = [
            "python",
            "testing/mozharness/scripts/release/update-verify-config-creator.py",
            "--product",
            task["extra"]["product"],
            "--stage-product",
            task["shipping-product"],
            "--app-name",
            task["extra"]["app-name"],
            "--branch-prefix",
            task["extra"]["branch-prefix"],
            "--platform",
            task["extra"]["platform"],
            "--to-version",
            release_config["version"],
            "--to-app-version",
            release_config["appVersion"],
            "--to-build-number",
            str(release_config["build_number"]),
            "--to-buildid",
            config.params["moz_build_date"],
            "--to-revision",
            get_branch_rev(config),
            "--output-file",
            "update-verify.cfg",
        ]

        repo_path = urlparse.urlsplit(get_branch_repo(config)).path.lstrip("/")
        command.extend(["--repo-path", repo_path])

        if release_config.get("partial_versions"):
            for partial in release_config["partial_versions"].split(","):
                command.extend(
                    ["--partial-version",
                     partial.split("build")[0]])

        for arg in optional_args:
            if task["extra"].get(arg):
                command.append("--{}".format(arg))
                command.append(task["extra"][arg])

        for arg in keyed_by_args:
            thing = "extra.{}".format(arg)
            resolve_keyed_by(task,
                             thing,
                             item_name=task["name"],
                             platform=task["attributes"]["build_platform"],
                             **{
                                 "release-type": config.params["release_type"],
                                 "release-level":
                                 config.params.release_level(),
                             })
            # ignore things that resolved to null
            if not task["extra"].get(arg):
                continue
            if arg == "include-version":
                task["extra"][arg] = INCLUDE_VERSION_REGEXES[task["extra"]
                                                             [arg]]
            if arg == "mar-channel-id-override":
                task["extra"][arg] = MAR_CHANNEL_ID_OVERRIDE_REGEXES[
                    task["extra"][arg]]

            command.append("--{}".format(arg))
            command.append(task["extra"][arg])

        task["run"].update({
            "using": "mach",
            "mach": " ".join(command),
        })

        yield task
コード例 #39
0
 def __init__(self):
     normalize = lambda url: urlparse.urlsplit(url).geturl()
     self.fetched = NormDict(normalize)
     self.resolved = NormDict(normalize)
     self.resolving = NormDict(normalize)
     self.index = NormDict()
コード例 #40
0
ファイル: __init__.py プロジェクト: bobuhiro11/gnocchi
def get_driver(conf):
    """Return the configured driver."""
    split = parse.urlsplit(conf.indexer.url)
    d = driver.DriverManager('gnocchi.indexer', split.scheme).driver
    return d(conf)
コード例 #41
0
def has_in_url_path(url, sub):
    """Test if the `sub` string is in the `url` path."""
    scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
    return sub in path
コード例 #42
0
 def redirect_tor(self, request):
     _, _, path, query, frag = urlsplit(request.uri)
     redirect_url = urlunsplit(
         (b'http', State.tenant_cache[request.tid].onionnames[0], path,
          query, frag))
     self.redirect(request, redirect_url)
コード例 #43
0
 def redirect_https(self, request):
     _, _, path, query, frag = urlsplit(request.uri)
     redirect_url = urlunsplit(
         (b'https', request.hostname, path, query, frag))
     self.redirect(request, redirect_url)
コード例 #44
0
ファイル: yarn.py プロジェクト: raotkind/integrations-core
 def _get_url_base(self, url):
     """
     Return the base of a URL
     """
     s = urlsplit(url)
     return urlunsplit([s.scheme, s.netloc, '', '', ''])
コード例 #45
0
 def parse_components(url):
     parsed = parse.urlsplit(url)
     query = parse.parse_qs(parsed.query)
     return parsed._replace(query=''), query
コード例 #46
0
 def get_volume_api_version_from_endpoint(self):
     magic_tuple = urlparse.urlsplit(self.management_url)
     scheme, netloc, path, query, frag = magic_tuple
     return path.lstrip('/').split('/')[0][1:]
コード例 #47
0
ファイル: http.py プロジェクト: sontek/sentry
    def to_python(cls, data):
        is_valid, errors = validate_and_default_interface(data, cls.path)
        if not is_valid:
            raise InterfaceValidationError("Invalid interface data")

        kwargs = {}

        if data.get('method'):
            method = data['method'].upper()
            # Optimize for the common path here, where it's a GET/POST, falling
            # back to a regular expresion test
            if method not in ('GET',
                              'POST') and not http_method_re.match(method):
                raise InterfaceValidationError("Invalid value for 'method'")
            kwargs['method'] = method
        else:
            kwargs['method'] = None

        scheme, netloc, path, query_bit, fragment_bit = urlsplit(data['url'])

        query_string = data.get('query_string') or query_bit
        if query_string:
            # if querystring was a dict, convert it to a string
            if isinstance(query_string, dict):
                query_string = urlencode([(to_bytes(k), to_bytes(v))
                                          for k, v in query_string.items()])
            else:
                if query_string[0] == '?':
                    # remove '?' prefix
                    query_string = query_string[1:]
            kwargs['query_string'] = trim(query_string, 4096)
        else:
            kwargs['query_string'] = ''

        fragment = data.get('fragment') or fragment_bit

        cookies = data.get('cookies')
        # if cookies were [also] included in headers we
        # strip them out
        headers = data.get('headers')
        if headers:
            headers, cookie_header = format_headers(headers)
            if not cookies and cookie_header:
                cookies = cookie_header
        else:
            headers = ()

        # We prefer the body to be a string, since we can then attempt to parse it
        # as JSON OR decode it as a URL encoded query string, without relying on
        # the correct content type header being passed.
        body = data.get('data')

        content_type = next((v for k, v in headers if k == 'Content-Type'),
                            None)

        # Remove content type parameters
        if content_type is not None:
            content_type = content_type.partition(';')[0].rstrip()

        # We process request data once during ingestion and again when
        # requesting the http interface over the API. Avoid overwriting
        # decoding the body again.
        inferred_content_type = data.get('inferred_content_type', content_type)

        if 'inferred_content_type' not in data and not isinstance(body, dict):
            body, inferred_content_type = heuristic_decode(body, content_type)

        if body:
            body = trim(body, settings.SENTRY_MAX_HTTP_BODY_SIZE)

        kwargs['inferred_content_type'] = inferred_content_type
        kwargs['cookies'] = trim_pairs(format_cookies(cookies))
        kwargs['env'] = trim_dict(data.get('env') or {})
        kwargs['headers'] = trim_pairs(headers)
        kwargs['data'] = fix_broken_encoding(body)
        kwargs['url'] = urlunsplit((scheme, netloc, path, '', ''))
        kwargs['fragment'] = trim(fragment, 1024)

        return cls(**kwargs)
コード例 #48
0
 def _endpoint_from_url(self, url):
     app, name = urlsplit(url).path.split("/")[2:4]
     return getattr(pynetbox.core.app.App(self.api, app), name)
コード例 #49
0
ファイル: client.py プロジェクト: PlutoVR/metachromium
 def url(self, url):
     if urlparse.urlsplit(url).netloc is None:
         return self.url(url)
     body = {"url": url}
     return self.send_session_command("POST", "url", body)
コード例 #50
0
ファイル: responseloader.py プロジェクト: mirrorweb/pywb
    def load_resource(self, cdx, params):
        if cdx.get('filename') and cdx.get('offset') is not None:
            return None

        load_url = cdx.get('load_url')
        if not load_url:
            return None

        if params.get('content_type') == VideoLoader.CONTENT_TYPE:
            return None

        if self.forward_proxy_prefix and not cdx.get('is_live'):
            load_url = self.forward_proxy_prefix + load_url

        input_req = params['_input_req']

        req_headers = input_req.get_req_headers()

        dt = timestamp_to_datetime(cdx['timestamp'])

        if cdx.get('memento_url'):
            req_headers['Accept-Datetime'] = datetime_to_http_date(dt)

        method = input_req.get_req_method()
        data = input_req.get_req_body()

        p = PreparedRequest()
        try:
            p.prepare_url(load_url, None)
        except Exception:
            raise LiveResourceException(load_url)
        p.prepare_headers(None)
        p.prepare_auth(None, load_url)

        auth = p.headers.get('Authorization')
        if auth:
            req_headers['Authorization'] = auth

        load_url = p.url

        # host is set to the actual host for live loading
        # ensure it is set to the load_url host
        if not cdx.get('is_live'):
            #req_headers.pop('Host', '')
            req_headers['Host'] = urlsplit(p.url).netloc

            referrer = cdx.get('set_referrer')
            if referrer:
                req_headers['Referer'] = referrer

        upstream_res = self._do_request_with_redir_check(
            method, load_url, data, req_headers, params, cdx)

        memento_dt = upstream_res.headers.get('Memento-Datetime')
        if memento_dt:
            dt = http_date_to_datetime(memento_dt)
            cdx['timestamp'] = datetime_to_timestamp(dt)
        elif cdx.get('memento_url'):
            # if 'memento_url' set and no Memento-Datetime header present
            # then its an error
            no_except_close(upstream_res)
            return None

        agg_type = upstream_res.headers.get('Warcserver-Type')
        if agg_type == 'warc':
            cdx['source'] = unquote(
                upstream_res.headers.get('Warcserver-Source-Coll'))
            return None, upstream_res.headers, upstream_res

        if upstream_res.version == 11:
            version = '1.1'
        else:
            version = '1.0'

        status = 'HTTP/{version} {status} {reason}\r\n'
        status = status.format(version=version,
                               status=upstream_res.status,
                               reason=upstream_res.reason)

        http_headers_buff = status

        orig_resp = upstream_res._original_response

        try:  #pragma: no cover
            #PY 3
            resp_headers = orig_resp.headers._headers
            for n, v in resp_headers:
                nl = n.lower()
                if nl in self.SKIP_HEADERS:
                    continue

                if nl in self.UNREWRITE_HEADERS:
                    v = self.unrewrite_header(cdx, v)

                http_headers_buff += n + ': ' + v + '\r\n'

            http_headers_buff += '\r\n'

            try:
                # http headers could be encoded as utf-8 (though non-standard)
                # first try utf-8 encoding
                http_headers_buff = http_headers_buff.encode('utf-8')
            except:
                # then, fall back to latin-1
                http_headers_buff = http_headers_buff.encode('latin-1')

        except:  #pragma: no cover
            #PY 2
            resp_headers = orig_resp.msg.headers

            for line in resp_headers:
                n, v = line.split(':', 1)
                n = n.lower()
                v = v.strip()

                if n in self.SKIP_HEADERS:
                    continue

                new_v = v
                if n in self.UNREWRITE_HEADERS:
                    new_v = self.unrewrite_header(cdx, v)

                if new_v != v:
                    http_headers_buff += n + ': ' + new_v + '\r\n'
                else:
                    http_headers_buff += line

            # if python2, already byte headers, so leave as is
            http_headers_buff += '\r\n'

        try:
            fp = upstream_res._fp.fp
            if hasattr(fp, 'raw'):  #pragma: no cover
                fp = fp.raw
            remote_ip = fp._sock.getpeername()[0]
        except:  #pragma: no cover
            remote_ip = None

        warc_headers = {}

        warc_headers['WARC-Type'] = 'response'
        warc_headers['WARC-Record-ID'] = self._make_warc_id()
        warc_headers['WARC-Target-URI'] = cdx['url']
        warc_headers['WARC-Date'] = datetime_to_iso_date(dt)

        if not cdx.get('is_live'):
            now = datetime.datetime.utcnow()
            warc_headers['WARC-Source-URI'] = cdx.get('load_url')
            warc_headers['WARC-Creation-Date'] = datetime_to_iso_date(now)

        if remote_ip:
            warc_headers['WARC-IP-Address'] = remote_ip

        ct = upstream_res.headers.get('Content-Type')
        if ct:
            metadata = self.get_custom_metadata(ct, dt)
            if metadata:
                warc_headers['WARC-JSON-Metadata'] = json.dumps(metadata)

        warc_headers['Content-Type'] = 'application/http; msgtype=response'

        if method == 'HEAD':
            content_len = 0
        else:
            content_len = upstream_res.headers.get('Content-Length', -1)

        self._set_content_len(content_len, warc_headers,
                              len(http_headers_buff))

        warc_headers = StatusAndHeaders('WARC/1.0', warc_headers.items())
        return (warc_headers, http_headers_buff, upstream_res)
コード例 #51
0
 def __init__(self, url=''):
     if url:
         self._elem = list(urllib_parse.urlsplit(url))
     else:
         self._elem = []
コード例 #52
0
    def resolve_image(self, src):
        description = ''
        if urlsplit(src)[0]:
            # We have a scheme
            return None, None, src, description

        base = self.context
        subpath = src
        appendix = ''

        def traversal_stack(base, path):
            if path.startswith('/'):
                base = getSite()
                path = path[1:]
            obj = base
            stack = [obj]
            components = path.split('/')
            while components:
                child_id = unquote(components.pop(0))
                try:
                    if hasattr(aq_base(obj), 'scale'):
                        if components:
                            child = obj.scale(child_id, components.pop())
                        else:
                            child = obj.scale(child_id)
                    else:
                        # Do not use restrictedTraverse here; the path to the
                        # image may lead over containers that lack the View
                        # permission for the current user!
                        # Also, if the image itself is not viewable, we rather
                        # show a broken image than hide it or raise
                        # unauthorized here (for the referring document).
                        child = obj.unrestrictedTraverse(str(child_id))
                except ConflictError:
                    raise
                except (AttributeError, KeyError, NotFound, ztkNotFound):
                    return
                obj = child
                stack.append(obj)
            return stack

        def traverse_path(base, path):
            stack = traversal_stack(base, path)
            if stack is None:
                return
            return stack[-1]

        obj, subpath, appendix = self.resolve_link(src)
        if obj is not None:
            # resolved uid
            fullimage = obj
            image = traverse_path(fullimage, subpath)
        elif '/@@' in subpath:
            # split on view
            pos = subpath.find('/@@')
            fullimage = traverse_path(base, subpath[:pos])
            if fullimage is None:
                return None, None, src, description
            image = traverse_path(fullimage, subpath[pos + 1:])
        else:
            stack = traversal_stack(base, subpath)
            if stack is None:
                return None, None, src, description
            image = stack.pop()
            # if it's a scale, find the full image by traversing one less
            fullimage = image
            if not IContentish.providedBy(fullimage):
                stack.reverse()
                for parent in stack:
                    if hasattr(aq_base(parent), 'tag'):
                        fullimage = parent
                        break

        if image is None:
            return None, None, src, description

        try:
            url = image.absolute_url()
        except AttributeError:
            return None, None, src, description
        src = url + appendix
        description = safe_unicode(aq_acquire(fullimage, 'Description')())
        return image, fullimage, src, description
コード例 #53
0
 def payment_domain_name(self):
     if self.enable_microfrontend_for_basket_page:
         return urlsplit(self.payment_microfrontend_url).netloc
     return self.site.domain
コード例 #54
0
ファイル: core.py プロジェクト: fangyuan830/python-sasctl
    def __init__(self,
                 hostname,
                 username=None,
                 password=None,
                 authinfo=None,
                 protocol=None,
                 port=None,
                 verify_ssl=None):
        super(Session, self).__init__()

        # Determine whether or not server SSL certificates should be verified.
        if verify_ssl is None:
            verify_ssl = os.environ.get('SSLREQCERT', 'yes')
            verify_ssl = str(verify_ssl).lower() not in ('no', 'false')

        self._id = uuid4().hex
        self.message_log = logger.getChild('session.%s' % self._id)

        # If certificate path has already been set for SWAT package, make
        # Requests module reuse it.
        for k in ['SSLCALISTLOC', 'CAS_CLIENT_SSL_CA_LIST']:
            if k in os.environ:
                os.environ['REQUESTS_CA_BUNDLE'] = os.environ[k]
                break

        # If certificate path hasn't been specified in either environment
        # variable, replace the default adapter with one that will use the
        # machine's default SSL _settings.
        if 'REQUESTS_CA_BUNDLE' not in os.environ:
            if verify_ssl:
                # Skip hostname verification if IP address specified instead
                # of DNS name.  Prevents error from urllib3.
                try:
                    from urllib3.util.ssl_ import is_ipaddress
                except ImportError:
                    # is_ipaddres not present in older versions of urllib3
                    def is_ipaddress(hst):
                        return re.match(r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$",
                                        hst)

                verify_hostname = not is_ipaddress(hostname)
                adapter = SSLContextAdapter(assert_hostname=verify_hostname)

                self.mount('https://', adapter)

            else:
                # Every request will generate an InsecureRequestWarning
                from urllib3.exceptions import InsecureRequestWarning
                warnings.simplefilter('default', InsecureRequestWarning)

        self.filters = DEFAULT_FILTERS

        # Used for context manager
        self._old_session = None

        # Reuse an existing CAS connection if possible
        if swat and isinstance(hostname, swat.CAS):
            if isinstance(hostname._sw_connection,
                          swat.cas.rest.connection.REST_CASConnection):
                import base64

                # Use the httpAddress action to retieve information about
                # REST endpoints
                httpAddress = hostname.get_action('builtins.httpAddress')
                address = httpAddress()
                domain = address.virtualHost
                # httpAddress action may return virtualHost = ''
                # if this happens, try the CAS host
                if not domain:
                    domain = hostname._sw_connection._current_hostname
                protocol = address.protocol
                port = address.port
                auth = hostname._sw_connection._auth.decode('utf-8').replace(
                    'Basic ', '')
                username, password = base64.b64decode(auth).decode(
                    'utf-8').split(':')
            else:
                raise ValueError("A 'swat.CAS' session can only be reused "
                                 "when it's connected via the REST APIs.")
        else:
            url = urlsplit(hostname)

            # Extract http/https from domain name if present and protocl not
            # explicitly given
            protocol = protocol or url.scheme

            domain = url.hostname or str(hostname)

        self._settings = {
            'protocol': protocol or 'https',
            'domain': domain,
            'port': port,
            'username': username,
            'password': password
        }

        if self._settings['password'] is None:
            # Try to get credentials from .authinfo or .netrc files.
            # If no file path was specified, the default locations will
            # be checked.
            if 'swat' in sys.modules:
                auth = swat.utils.authinfo.query_authinfo(domain,
                                                          user=username,
                                                          path=authinfo)
                self._settings['username'] = auth.get('user')
                self._settings['password'] = auth.get('password')

            # Not able to load credentials using SWAT.  Try Netrc.
            # TODO: IF a username was specified, verify that the credentials
            #       found are for that username.
            if self._settings['password'] is None:
                try:
                    parser = netrc.netrc(authinfo)
                    values = parser.authenticators(domain)
                    if values:
                        self._settings['username'], \
                        _, \
                        self._settings['password'] = values
                except (OSError, IOError):
                    pass  # netrc throws if $HOME is not set

        self.verify = verify_ssl
        self.auth = HTTPBearerAuth(self.get_token())

        if current_session() is None:
            current_session(self)
コード例 #55
0
    def __init__(self, **settings):
        # check if some of the settings provided as class attributes
        # should be overwritten
        for name, value in settings.items():
            if hasattr(self, name):
                setattr(self, name, value)

        validate_settings(self)

        self.last_headers_name = None
        self.last_headers_value = None

        self.os_options = {
            'tenant_id': self.tenant_id,
            'tenant_name': self.tenant_name,
            'user_domain_id': self.user_domain_id,
            'user_domain_name': self.user_domain_name,
            'project_domain_id': self.project_domain_id,
            'project_domain_name': self.project_domain_name,
            'region_name': self.region_name,
        }
        self.os_options.update(self.os_extra_options)

        # Get authentication token
        self.storage_url, self.token = swiftclient.get_auth(
            self.api_auth_url,
            self.api_username,
            self.api_key,
            auth_version=self.auth_version,
            os_options=self.os_options)
        self.http_conn = swiftclient.http_connection(self.storage_url)

        # Check container
        try:
            swiftclient.head_container(self.storage_url,
                                       self.token,
                                       self.container_name,
                                       http_conn=self.http_conn)
        except swiftclient.ClientException:
            headers = {}
            if self.auto_create_container:
                if self.auto_create_container_public:
                    headers['X-Container-Read'] = '.r:*'
                if self.auto_create_container_allow_orgin:
                    headers['X-Container-Meta-Access-Control-Allow-Origin'] = \
                        self.auto_create_container_allow_orgin
                swiftclient.put_container(self.storage_url,
                                          self.token,
                                          self.container_name,
                                          http_conn=self.http_conn,
                                          headers=headers)
            else:
                raise ImproperlyConfigured("Container %s does not exist." %
                                           self.container_name)

        if self.auto_base_url:
            # Derive a base URL based on the authentication information from
            # the server, optionally overriding the protocol, host/port and
            # potentially adding a path fragment before the auth information.
            self.base_url = self.storage_url + '/'
            if self.override_base_url is not None:
                # override the protocol and host, append any path fragments
                split_derived = urlparse.urlsplit(self.base_url)
                split_override = urlparse.urlsplit(self.override_base_url)
                split_result = [''] * 5
                split_result[0:2] = split_override[0:2]
                split_result[2] = (split_override[2] +
                                   split_derived[2]).replace('//', '/')
                self.base_url = urlparse.urlunsplit(split_result)

            self.base_url = urlparse.urljoin(self.base_url,
                                             self.container_name)
            self.base_url += '/'
        else:
            self.base_url = self.override_base_url
コード例 #56
0
ファイル: Element.py プロジェクト: abay123/thug
    def setAttribute(self, name, value):
        if log.ThugOpts.features_logging:
            log.ThugLogging.Features.increase_setattribute_count()

        if not isinstance(name, six.string_types):
            name = str(name)

        if log.ThugOpts.Personality.isFirefox():
            if name in ('style', ):
                svalue = value.split('-')

                _value = svalue[0]
                if len(svalue) > 1:
                    _value = '{}{}'.format(
                        _value, ''.join([s.capitalize() for s in svalue[1:]]))

                for css in [
                        p for p in FF_STYLES
                        if log.ThugOpts.Personality.browserMajorVersion >= p[0]
                ]:
                    if css[1] in value:
                        self.tag[name] = _value
                return

            if name in ('type', ):
                for _input in [
                        p for p in FF_INPUTS
                        if log.ThugOpts.Personality.browserMajorVersion > p[0]
                ]:
                    if _input[1] in value:
                        self.tag[name] = value
                return

        self.tag[name] = value

        if name.lower() in ('src', 'archive'):
            s = urlparse.urlsplit(value)

            handler = getattr(log.SchemeHandler, 'handle_%s' % (s.scheme, ),
                              None)
            if handler:
                handler(self.doc.window, value)
                return

            try:
                response = self.doc.window._navigator.fetch(
                    value, redirect_type="element workaround")
            except Exception:
                return

            if response is None:
                return

            if response.status_code == 404:
                return

            ctype = response.headers.get('content-type', None)
            if ctype is None:
                return

            handler = log.MIMEHandler.get_handler(ctype)
            if handler:
                handler(self.doc.window.url, response.content)
コード例 #57
0
ファイル: test_blob.py プロジェクト: Parthi10/gcloud-python-1
    def test_upload_from_file_resumable(self):
        from six.moves.http_client import OK
        from six.moves.urllib.parse import parse_qsl
        from six.moves.urllib.parse import urlsplit
        from gcloud._testing import _Monkey
        from gcloud._testing import _NamedTemporaryFile
        from gcloud.streaming import http_wrapper
        from gcloud.streaming import transfer

        BLOB_NAME = 'blob-name'
        UPLOAD_URL = 'http://example.com/upload/name/key'
        DATA = b'ABCDEF'
        loc_response = {'status': OK, 'location': UPLOAD_URL}
        chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
                           'range': 'bytes 0-4'}
        chunk2_response = {'status': OK}
        # Need valid JSON on last response, since resumable.
        connection = _Connection(
            (loc_response, b''),
            (chunk1_response, b''),
            (chunk2_response, b'{}'),
        )
        client = _Client(connection)
        bucket = _Bucket(client)
        blob = self._makeOne(BLOB_NAME, bucket=bucket)
        blob._CHUNK_SIZE_MULTIPLE = 1
        blob.chunk_size = 5

        # Set the threshhold low enough that we force a resumable uploada.
        with _Monkey(transfer, RESUMABLE_UPLOAD_THRESHOLD=5):
            with _NamedTemporaryFile() as temp:
                with open(temp.name, 'wb') as file_obj:
                    file_obj.write(DATA)
                with open(temp.name, 'rb') as file_obj:
                    blob.upload_from_file(file_obj, rewind=True)

        rq = connection.http._requested
        self.assertEqual(len(rq), 3)

        # Requested[0]
        headers = dict(
            [(x.title(), str(y)) for x, y in rq[0].pop('headers').items()])
        self.assertEqual(headers['X-Upload-Content-Length'], '6')
        self.assertEqual(headers['X-Upload-Content-Type'],
                         'application/octet-stream')

        uri = rq[0].pop('uri')
        scheme, netloc, path, qs, _ = urlsplit(uri)
        self.assertEqual(scheme, 'http')
        self.assertEqual(netloc, 'example.com')
        self.assertEqual(path, '/b/name/o')
        self.assertEqual(dict(parse_qsl(qs)),
                         {'uploadType': 'resumable', 'name': BLOB_NAME})
        self.assertEqual(rq[0], {
            'method': 'POST',
            'body': '',
            'connection_type': None,
            'redirections': 5,
        })

        # Requested[1]
        headers = dict(
            [(x.title(), str(y)) for x, y in rq[1].pop('headers').items()])
        self.assertEqual(headers['Content-Range'], 'bytes 0-4/6')
        self.assertEqual(rq[1], {
            'method': 'PUT',
            'uri': UPLOAD_URL,
            'body': DATA[:5],
            'connection_type': None,
            'redirections': 5,
        })

        # Requested[2]
        headers = dict(
            [(x.title(), str(y)) for x, y in rq[2].pop('headers').items()])
        self.assertEqual(headers['Content-Range'], 'bytes 5-5/6')
        self.assertEqual(rq[2], {
            'method': 'PUT',
            'uri': UPLOAD_URL,
            'body': DATA[5:],
            'connection_type': None,
            'redirections': 5,
        })
コード例 #58
0
def get_path(url):
    p = urlsplit(url)
    return urlunsplit(['', '', p.path or '/', p.query, p.fragment])
コード例 #59
0
ファイル: requirements.py プロジェクト: jkp/pipenv
    def get_link_from_line(cls, line):
        """Parse link information from given requirement line.

        Return a 6-tuple:

        - `vcs_type` indicates the VCS to use (e.g. "git"), or None.
        - `prefer` is either "file", "path" or "uri", indicating how the
            information should be used in later stages.
        - `relpath` is the relative path to use when recording the dependency,
            instead of the absolute path/URI used to perform installation.
            This can be None (to prefer the absolute path or URI).
        - `path` is the absolute file path to the package. This will always use
            forward slashes. Can be None if the line is a remote URI.
        - `uri` is the absolute URI to the package. Can be None if the line is
            not a URI.
        - `link` is an instance of :class:`pip._internal.index.Link`,
            representing a URI parse result based on the value of `uri`.

        This function is provided to deal with edge cases concerning URIs
        without a valid netloc. Those URIs are problematic to a straight
        ``urlsplit` call because they cannot be reliably reconstructed with
        ``urlunsplit`` due to a bug in the standard library:

        >>> from urllib.parse import urlsplit, urlunsplit
        >>> urlunsplit(urlsplit('git+file:///this/breaks'))
        'git+file:/this/breaks'
        >>> urlunsplit(urlsplit('file:///this/works'))
        'file:///this/works'

        See `https://bugs.python.org/issue23505#msg277350`.
        """
        # Git allows `[email protected]...` lines that are not really URIs.
        # Add "ssh://" so we can parse correctly, and restore afterwards.
        fixed_line = add_ssh_scheme_to_git_uri(line)
        added_ssh_scheme = fixed_line != line

        # We can assume a lot of things if this is a local filesystem path.
        if "://" not in fixed_line:
            p = Path(fixed_line).absolute()
            path = p.as_posix()
            uri = p.as_uri()
            link = Link(uri)
            try:
                relpath = get_converted_relative_path(path)
            except ValueError:
                relpath = None
            return LinkInfo(None, "path", relpath, path, uri, link)

        # This is an URI. We'll need to perform some elaborated parsing.

        parsed_url = urllib_parse.urlsplit(fixed_line)
        if added_ssh_scheme and ':' in parsed_url.netloc:
            original_netloc, original_path_start = parsed_url.netloc.rsplit(':', 1)
            uri_path = '/{0}{1}'.format(original_path_start, parsed_url.path)
            original_url = parsed_url
            parsed_url = original_url._replace(netloc=original_netloc, path=uri_path)

        # Split the VCS part out if needed.
        original_scheme = parsed_url.scheme
        if "+" in original_scheme:
            vcs_type, scheme = original_scheme.split("+", 1)
            parsed_url = parsed_url._replace(scheme=scheme)
            prefer = "uri"
        else:
            vcs_type = None
            prefer = "file"

        if parsed_url.scheme == "file" and parsed_url.path:
            # This is a "file://" URI. Use url_to_path and path_to_url to
            # ensure the path is absolute. Also we need to build relpath.
            path = Path(url_to_path(urllib_parse.urlunsplit(parsed_url))).as_posix()
            try:
                relpath = get_converted_relative_path(path)
            except ValueError:
                relpath = None
            uri = path_to_url(path)
        else:
            # This is a remote URI. Simply use it.
            path = None
            relpath = None
            # Cut the fragment, but otherwise this is fixed_line.
            uri = urllib_parse.urlunsplit(
                parsed_url._replace(scheme=original_scheme, fragment="")
            )

        if added_ssh_scheme:
            original_uri = urllib_parse.urlunsplit(original_url._replace(scheme=original_scheme, fragment=""))
            uri = strip_ssh_from_git_uri(original_uri)

        # Re-attach VCS prefix to build a Link.
        link = Link(
            urllib_parse.urlunsplit(parsed_url._replace(scheme=original_scheme))
        )

        return LinkInfo(vcs_type, prefer, relpath, path, uri, link)
コード例 #60
0
def has_in_url_path(url, subs):
    """Test if any of `subs` strings is present in the `url` path."""
    scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
    return any([sub in path for sub in subs])