def test_quoted_userinfo(): url = URL('http://wikipedia.org') url.username = u'user' url.password = u'p@ss' assert url.to_text(full_quote=True) == 'http://*****:*****@wikipedia.org' url = URL(u'http://beyonc\xe9:b\[email protected]') # assert url.to_text(full_quote=False) == u'http://beyoncé:b%C3%[email protected]' assert url.to_text(full_quote=True) == u'http://beyonc%C3%A9:b%C3%[email protected]'
def test_idna(): u1 = URL(u'http://bücher.ch') assert u1.host == u'bücher.ch' assert u1.to_text(full_quote=True) == 'http://xn--bcher-kva.ch' assert u1.to_text(full_quote=False) == u'http://bücher.ch' u2 = URL('https://xn--bcher-kva.ch') assert u2.host == u'bücher.ch' assert u2.to_text(full_quote=True) == 'https://xn--bcher-kva.ch' assert u2.to_text(full_quote=False) == u'https://bücher.ch'
def test_unicodey(): unicodey = (u'http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/' u'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}' u'?\N{LATIN SMALL LETTER A}\N{COMBINING ACUTE ACCENT}=' u'\N{LATIN SMALL LETTER I}\N{COMBINING ACUTE ACCENT}' u'#\N{LATIN SMALL LETTER U}\N{COMBINING ACUTE ACCENT}') url = URL(unicodey) assert url.host == u'é.com' assert url.path_parts[1] == u'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}' assert url.to_text(full_quote=False) == unicodey fully_quoted = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA' assert url.to_text(full_quote=True) == fully_quoted
def get_gh_project_info(url): ret = {} org, repo = URL(url).path_parts[1:] gh_url = URL('https://api.github.com/repos') gh_url.path_parts += (org, repo) project_url = gh_url.to_text() project_data = _get_gh_json(project_url) ret['star_count'] = project_data['stargazers_count'] gh_url.path_parts += ('tags', ) tags_url = gh_url.to_text() tags_data = _get_gh_json(tags_url) vtags_data = [td for td in tags_data if match_vtag(td['name'], PREFIXES)] ret['release_count'] = len(vtags_data) first_release = vtags_data[-1] first_release_data = _get_gh_rel_data(first_release, PREFIXES) for k, v in first_release_data.items(): ret['first_release_%s' % k] = v latest_release = vtags_data[0] latest_release_data = _get_gh_rel_data(latest_release, PREFIXES) for k, v in latest_release_data.items(): ret['latest_release_%s' % k] = v zv_releases = [ rel for rel in vtags_data if match_vtag(rel['name'], PREFIXES).group('major') == '0' ] ret['release_count_zv'] = len(zv_releases) print ' .. %s releases, %s 0ver' % (ret['release_count'], ret['release_count_zv']) is_zerover = zv_releases[0] == latest_release ret['is_zerover'] = is_zerover if is_zerover: return ret last_zv_release = zv_releases[0] last_zv_release_data = _get_gh_rel_data(last_zv_release, PREFIXES) for k, v in last_zv_release_data.items(): ret['last_zv_release_%s' % k] = v return ret
def compose_url(base_url, url): base_url = URL(base_url) url = URL(url) if not url.scheme: absolute_url = base_url.navigate(url.to_text()) else: absolute_url = url return absolute_url.to_text()
def test_parse_equals_in_qp_value(): u = URL('http://localhost/?=x=x=x') assert u.q[''] == 'x=x=x' assert u.to_text() == 'http://localhost/?=x%3Dx%3Dx' u = URL('http://localhost/?foo=x=x=x&bar=y') assert u.q['foo'] == 'x=x=x' assert u.q['bar'] == 'y'
def test_chained_navigate(expected, base, paths): """Chained :meth:`navigate` calls produces correct results.""" url = URL(base) for path in paths: url = url.navigate(path) assert expected == url.to_text()
def _test_bad_utf8(): # not part of the API bad_bin_url = 'http://xn--9ca.com/%00%FF/%C3%A9' url = URL(bad_bin_url) expected = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/' '%00%FF/' '\N{LATIN SMALL LETTER E WITH ACUTE}') actual = url.to_text() assert expected == actual
def test_netloc_slashes(): # basic sanity checks url = URL('mailto:[email protected]') assert url.scheme == 'mailto' assert url.to_text() == 'mailto:[email protected]' url = URL('http://hatnote.com') assert url.scheme == 'http' assert url.to_text() == 'http://hatnote.com' # test that unrecognized schemes stay consistent with '//' url = URL('newscheme:a:b:c') assert url.scheme == 'newscheme' assert url.to_text() == 'newscheme:a:b:c' url = URL('newerscheme://a/b/c') assert url.scheme == 'newerscheme' assert url.to_text() == 'newerscheme://a/b/c' # test that reasonable guesses are made url = URL('git+ftp://gitstub.biz/glyph/lefkowitz') assert url.scheme == 'git+ftp' assert url.to_text() == 'git+ftp://gitstub.biz/glyph/lefkowitz' url = URL('what+mailto:[email protected]') assert url.scheme == 'what+mailto' assert url.to_text() == 'what+mailto:[email protected]' url = URL() url.scheme = 'ztp' url.path = '/x/y/z' assert url.to_text() == 'ztp:/x/y/z' # also works when the input doesn't includ '//' url = URL() url.scheme = 'git+ftp' url.path = '/x/y/z/' assert url.to_text() == 'git+ftp:///x/y/z/' # really why would this ever come up but ok url = URL('file:///path/to/heck') url.scheme = 'mailto' assert url.to_text() == 'mailto:/path/to/heck' return
def get_gh_project_info(info): ret = {} url = info.get('gh_url') if url is None: return ret org, repo = URL(url.rstrip('/')).path_parts[1:] gh_url = URL('https://api.github.com/repos') gh_url.path_parts += (org, repo) project_url = gh_url.to_text() project_data = _get_gh_json(project_url) ret['star_count'] = project_data['stargazers_count'] gh_url.path_parts += ('tags', ) tags_url = gh_url.to_text() tags_data = _get_gh_json(tags_url) vtags_data = [td for td in tags_data if match_vtag(td['name'], PREFIXES)] ret['release_count'] = len(vtags_data) latest_release = vtags_data[0] latest_release_data = _get_gh_rel_data(latest_release, PREFIXES) for k, v in latest_release_data.items(): ret['latest_release_%s' % k] = v vtags_data.sort(key=lambda x: version_key(x['name'], PREFIXES), reverse=True) first_release_version = info.get('first_release_version') if first_release_version is None: first_release = [ v for v in vtags_data if version_key(v['name']) < version_key(latest_release['name']) ][-1] else: first_release = [ v for v in vtags_data if v['name'] == first_release_version ][0] first_release_data = _get_gh_rel_data(first_release, PREFIXES) for k, v in first_release_data.items(): ret['first_release_%s' % k] = v zv_releases = [ rel for rel in vtags_data if match_vtag(rel['name'], PREFIXES).group('major') == '0' ] ret['release_count_zv'] = len(zv_releases) print ' .. %s releases, %s 0ver' % (ret['release_count'], ret['release_count_zv']) is_zerover = latest_release in zv_releases ret['is_zerover'] = is_zerover if is_zerover: return ret last_zv_release = zv_releases[0] first_nonzv_release = vtags_data[vtags_data.index(last_zv_release) - 1] first_nonzv_release_data = _get_gh_rel_data(first_nonzv_release, PREFIXES) ret['last_zv_release_version'] = last_zv_release['name'] for k, v in first_nonzv_release_data.items(): ret['first_nonzv_release_%s' % k] = v #import pdb;pdb.set_trace() return ret
def process_bind_param(self, value: BoltonsUrl, dialect): return value.to_text()
def test_mailto(): mt = 'mailto:[email protected]' url = URL(mt) assert url.scheme == 'mailto' assert url.to_text() == mt
def test_basic(): u1 = URL('http://googlewebsite.com/e-shops.aspx') assert isinstance(u1.to_text(), unicode) assert u1.host == 'googlewebsite.com'
def test_self_normalize(): url = URL('http://hatnote.com/a/../../b?k=v#hashtags') url.normalize() assert url.to_text() == 'http://hatnote.com/b?k=v#hashtags'
def test_quoted_userinfo(): url = URL('http://wikipedia.org') url.username = u'user' url.password = u'p@ss' assert url.to_text(full_quote=True) == 'http://*****:*****@wikipedia.org'
def test_userinfo(): url = URL('http://*****:*****@example.com/some-segment@ignore') assert url.username == 'someuser' assert url.password == 'somepassword' assert url.to_text( ) == 'http://*****:*****@example.com/some-segment@ignore'
def test_iri_path(): url = URL(u'http://minerals.mountain.ore/rock/\N{SHAMROCK}/') assert url.path == u'/rock/\N{SHAMROCK}/' assert url.to_text(full_quote=True).endswith('%E2%98%98/')