예제 #1
0
 def test_mixed_types_rejected(self):
     # Several functions that process either strings or ASCII encoded bytes
     # accept multiple arguments. Check they reject mixed type input
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(
             (b"http", "www.python.org", "", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(
             ("http", b"www.python.org", "", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit((b"http", "www.python.org", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit(("http", b"www.python.org", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin("http://python.org", b"http://python.org")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin(b"http://python.org", "http://python.org")
예제 #2
0
 def checkJoin(self, base, relurl, expected):
     str_components = (base, relurl, expected)
     self.assertEqual(urllib_parse.urljoin(base, relurl), expected)
     bytes_components = baseb, relurlb, expectedb = [
         x.encode('ascii') for x in str_components
     ]
     self.assertEqual(urllib_parse.urljoin(baseb, relurlb), expectedb)
예제 #3
0
def parse(html):
    soup = BeautifulSoup(html, 'lxml')
    urls = soup.find_all('a', {"href": re.compile('^/.+?/$')})
    title = soup.find('h1').get_text().strip()

    page_urls = set([urljoin(base_url, url['href']) for url in urls])
    url = soup.find('meta', {'property': "og:url"})['content']

    return title, page_urls, url
예제 #4
0
 def get_attachments(self, obj):
     attachments = itertools.chain(
         itertools.chain(*map(
             lambda a: itertools.chain(a.attachments.all(
             ), a.report_attachments.all()), obj.tpm_activities.all())),
         obj.report_attachments.all())
     return ', '.join(
         map(
             lambda a: '{} - {}'.format(a.file_type,
                                        urljoin(site_url(), a.url)),
             attachments))
예제 #5
0
 def test_mixed_types_rejected(self):
     # Several functions that process either strings or ASCII encoded bytes
     # accept multiple arguments. Check they reject mixed type input
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(( b"http", "www.python.org","","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(("http", b"www.python.org","","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit((b"http", "www.python.org","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit(("http", b"www.python.org","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin("http://python.org", b"http://python.org")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin(b"http://python.org", "http://python.org")
예제 #6
0
파일: helper.py 프로젝트: yemilawal/phigaro
def download_file(filename, base_url, out_dir):
    url = urljoin(base_url, filename)
    out = join(out_dir, filename)
    if exists(out):
        while True:
            overwrite = input(
                'File {} already exists, overwrite it (Y/N)?'.format(out))
            if overwrite.upper() in {'Y', 'YES'}:
                overwrite = 'Y'
                break
            elif overwrite.upper() in {'N', 'NO'}:
                overwrite = 'N'
                break
        if overwrite == 'N':
            return

    print('Downloading {url} to {out}'.format(
        url=url,
        out=out,
    ))
    sh.wget('-O', out, url, _tty_out=True)
예제 #7
0
def endpoint_ava(endp, baseurl):
    key = '{}_endpoint'.format(endp.etype)
    val = urljoin(baseurl, endp.url)
    return {key: val}
예제 #8
0
 def checkJoin(self, base, relurl, expected):
     str_components = (base, relurl, expected)
     self.assertEqual(urllib_parse.urljoin(base, relurl), expected)
     bytes_components = baseb, relurlb, expectedb = [
                         x.encode('ascii') for x in str_components]
     self.assertEqual(urllib_parse.urljoin(baseb, relurlb), expectedb)
예제 #9
0
def endpoint_ava(endp, baseurl):
    key = '{}_endpoint'.format(endp.etype)
    val = urljoin(baseurl, endp.url)
    return {key: val}
예제 #10
0
파일: utils.py 프로젝트: nixworks/etools
def get_token_auth_link(user):
    token = create_callback_token_for_user(user, 'email')
    return update_url_with_kwargs(urljoin(site_url(), reverse('email_auth:login')), token=token)