def traverse_tree(self, elem, page=None, __tag_page_href=moin_page.page_href, __tag_link=_tag_xlink_href, __tag_include=_tag_xinclude_href): """ Traverses the tree and handles each element appropriately """ new_page_href = elem.get(__tag_page_href) if new_page_href: page = Iri(new_page_href) xlink_href = elem.get(__tag_link) xinclude_href = elem.get(__tag_include) if xlink_href: xlink_href = Iri(xlink_href) if xlink_href.scheme == 'wiki.local': self.handle_wikilocal_links(elem, xlink_href, page) elif xlink_href.scheme == 'wiki': self.handle_wiki_links(elem, xlink_href) elif xlink_href.scheme: self.handle_external_links(elem, xlink_href) elif xinclude_href: xinclude_href = Iri(xinclude_href) if xinclude_href.scheme == 'wiki.local': self.handle_wikilocal_transclusions(elem, xinclude_href, page) elif xinclude_href.scheme == 'wiki': self.handle_wiki_transclusions(elem, xinclude_href) elif xlink_href == '': # reST link to page fragment elem.set(self._tag_xlink_href, '#' + elem.text.replace(' ', '_')) for child in elem.iter_elements(): self.traverse_tree(child, page) return elem
def inline_object_repl(self, stack, object, object_url=None, object_item=None, object_text=None, object_args=None): """Handles objects transcluded within the page.""" if object_args: args = parse_arguments(object_args).keyword # XXX needs different parsing else: args = {} query_keys = {} attrib = {} whitelist = ['width', 'height', 'class'] for attr, value in args.items(): if attr.startswith('&'): query_keys[attr[1:]] = value elif attr in whitelist: attrib[html(attr)] = value if object_text: attrib[html.alt] = object_text if object_item is not None: # img tag query = url_encode(query_keys, charset=CHARSET, encode_keys=True) # TODO: moin 1.9 needed this for an attached file; move functionality to scripts/migration/moin/import19.py att = 'attachment:' if object_item.startswith(att): object_item = '/' + object_item[len(att):] # now we have a subitem target = Iri(scheme='wiki.local', path=object_item, query=query, fragment=None) attrib[xinclude.href] = target element = xinclude.include(attrib=attrib) stack.top_append(element) else: # object tag target = Iri(object_url) attrib[xlink.href] = target element = moin_page.object(attrib) stack.top_append(element)
def visit_reference(self, node): refuri = node.get('refuri', '') if refuri.startswith('<<') and refuri.endswith('>>'): # moin macro macro_name = refuri[2:-2].split('(')[0] if macro_name == "TableOfContents": arguments = refuri[2:-2].split('(')[1][:-1].split(',') node = moin_page.table_of_content() self.open_moin_page_node(node) if arguments and arguments[0]: node.set(moin_page.outline_level, arguments[0]) return if macro_name == "Include": # include macros are expanded by include.py similar to transclusions # rst include handles only wiki pages and does not support additional arguments like moinwiki arguments = refuri[2:-2].split('(')[1][:-1].split(',') link = Iri(scheme='wiki.local', path=arguments) node = xinclude.include( attrib={ xinclude.href: link, moin_page.alt: refuri, moin_page.content_type: 'x-moin/macro;name=' + macro_name, }) self.open_moin_page_node(node) return try: arguments = refuri[2:-2].split('(')[1][:-1] except IndexError: arguments = '' # <<DateTime>> self.open_moin_page_node( moin_page.inline_part( attrib={ moin_page.content_type: "x-moin/macro;name={0}".format(macro_name) })) if arguments: self.open_moin_page_node(moin_page.arguments()) self.open_moin_page_node(arguments) self.close_moin_page_node() self.close_moin_page_node() return if not allowed_uri_scheme(refuri): self.visit_error(node) return if refuri == '': # build a link to a heading or an explicitly defined anchor refuri = Iri(scheme='wiki.local', fragment=node.attributes['name'].replace(' ', '_')) self.open_moin_page_node(moin_page.a(attrib={xlink.href: refuri}))
def visit_image(self, node): """ Processes images and other transcluded objects. """ whitelist = [ 'width', 'height', 'alt', ] attrib = {} for key in whitelist: if node.get(key): attrib[html(key)] = node.get(key) # there is no 'scale' attribute, hence absent from whitelist, handled separately if node.get('scale'): scaling_factor = int(node.get('scale')) / 100.0 for key in ('width', 'height'): if html(key) in attrib: attrib[html(key)] = int( int(attrib[html(key)]) * scaling_factor) # "align" parameter is invalid in HTML5. Convert it to a class defined in userstyles.css. userstyles = { 'left': 'left', 'center': 'center', 'right': 'right', 'top': 'top', # rst parser creates error messages for top, bottom, and middle 'bottom': 'bottom', 'middle': 'middle', } alignment = userstyles.get(node.get('align')) if alignment: attrib[html.class_] = alignment url = Iri(node['uri']) if url.scheme is None: # img target = Iri(scheme='wiki.local', path=node['uri'], fragment=None) attrib[xinclude.href] = target new_node = xinclude.include(attrib=attrib) else: # obj new_node = moin_page.object(attrib) new_node.set(xlink.href, url) self.open_moin_page_node(new_node)
def test_Iri_quote_1(): u = Iri(scheme=u'wiki', authority=u'authority_ä%?#', path=u'/path_ä%?#', query=u'query_ä%?#', fragment=u'fragment_ä%?#') assert u.scheme == u'wiki' assert u.authority == u'authority_ä%?#' authority = u'authority_ä%25%3F%23' assert u.authority.fullquoted == authority assert u.authority.quoted == u'authority_ä%25?#' assert u.authority.urlquoted == u'authority_%C3%A4%25%3F%23' assert u.path == u'/path_ä%?#' path = u'/path_ä%25%3F%23' assert u.path.fullquoted == path assert u.path.quoted == u'/path_ä%25?#' assert u.path.urlquoted == u'/path_%C3%A4%25%3F%23' assert u.query == u'query_ä%?#' query = u'query_ä%25?%23' assert u.query.fullquoted == query assert u.query.quoted == u'query_ä%25?#' assert u.query.urlquoted == u'query_%C3%A4%25?%23' assert u.fragment == u'fragment_ä%?#' fragment = u'fragment_ä%25?%23' assert u.fragment.fullquoted == fragment assert u.fragment.quoted == u'fragment_ä%25?#' assert u.fragment.urlquoted == u'fragment_%C3%A4%25?%23' assert unicode(u) == u'wiki://{0}{1}?{2}#{3}'.format(authority, path, query, fragment)
def __call__(self, rev, contenttype=None, arguments=None): item_name = rev.item.name query_keys = {'do': 'get', 'rev': rev.revid} attrib = {} if arguments: query = arguments.keyword.get(xinclude.href) if query and query.query: # query.query value is similar to "w=75" given a transclusion "{{jpeg||&w=75 class="top"}}" query_keys.update(url_decode(query.query)) attrib = arguments.keyword query = url_encode(query_keys, charset=CHARSET, encode_keys=True) attrib.update({ moin_page.type_: str(self.input_type), xlink.href: Iri(scheme='wiki', authority='', path='/' + rev.item.fqname.fullname, query=query), }) obj = moin_page.object_(attrib=attrib, children=[ item_name, ]) body = moin_page.body(children=(obj, )) return moin_page.page(children=(body, ))
def test_Iri_init_1(): u = Iri(scheme='wiki', path='/StartSeite', query='action=raw') assert u.scheme == 'wiki' assert u.authority is None assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment is None
def inline_object_repl(self, stack, object, object_page=None, object_url=None, object_text=None): """Handles objects included in the page.""" attrib = {} if object_text: attrib[html.alt] = object_text if object_page is not None: att = 'attachment:' # moin 1.9 needed this for an attached file if object_page.startswith(att): object_page = '/' + object_page[len( att):] # now we have a subitem target = Iri(scheme='wiki.local', path=object_page) attrib[xinclude.href] = target element = xinclude.include(attrib=attrib) else: attrib[xlink.href] = object_url element = moin_page.object( attrib=attrib, children=( 'Your Browser does not support HTML5 audio/video element.', )) stack.top_append(element)
def inline_link_repl(self, stack, link, link_url=None, link_item=None, link_text=None, link_interwiki_site=None, link_interwiki_item=None): """Handle all kinds of links.""" if link_interwiki_site: if is_known_wiki(link_interwiki_site): link = Iri(scheme='wiki', authority=link_interwiki_site, path='/' + link_interwiki_item) element = moin_page.a(attrib={xlink.href: link}) stack.push(element) if link_text: self.parse_inline(link_text, stack, self.inlinedesc_re) else: stack.top_append(link_interwiki_item) stack.pop() return else: # assume local language uses ":" inside of words, set link_item and continue link_item = '{0}:{1}'.format(link_interwiki_site, link_interwiki_item) if link_item is not None: att = 'attachment:' # moin 1.9 needed this for an attached file if link_item.startswith(att): link_item = '/' + link_item[len(att):] # now we have a subitem # we have Anchor macro, so we support anchor links despite lack of docs in Creole spec if '#' in link_item: path, fragment = link_item.rsplit('#', 1) else: path, fragment = link_item, None target = Iri(scheme='wiki.local', path=path, fragment=fragment) text = link_item else: target = Iri(link_url) text = link_url element = moin_page.a(attrib={xlink.href: target}) stack.push(element) if link_text: self.parse_inline(link_text, stack, self.inlinedesc_re) else: stack.top_append(text) stack.pop()
def test_Iri_init_2(): i = 'wiki://MoinMoin/StartSeite?action=raw#body' u = Iri(i, scheme='newwiki', path='/newStartSeite', query='action=false') assert u.scheme == 'newwiki' assert u.authority == 'MoinMoin' assert u.path == '/newStartSeite' assert u.query == 'action=false' assert u.fragment == 'body'
def test_Iri_quote_2(): authority = u'authority_ä%25%3F%23' path = u'/path_ä%25%3F%23' query = u'query_ä%25?%23' fragment = u'fragment_ä%25?%23' i = u'wiki://{0}{1}?{2}#{3}'.format(authority, path, query, fragment) u = Iri(i) assert unicode(u) == i
def __call__(self, rev, contenttype=None, arguments=None): item_name = rev.item.fqname.fullname attrib = { xlink.href: Iri(scheme='wiki', authority='', path='/' + item_name, query='do=modify'), } a = moin_page.a(attrib=attrib, children=[_("%(item_name)s does not exist. Create it?", item_name=item_name)]) body = moin_page.body(children=(a, )) return moin_page.page(children=(body, ))
def test_Iri_3(): i = 'wiki.local:StartSeite?action=raw#body' u = Iri(i) assert u.scheme == 'wiki.local' assert u.authority is None assert u.path == 'StartSeite' assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i
def test_Iri_2(): i = 'wiki://MoinMoin/StartSeite?action=raw#body' u = Iri(i) assert u.scheme == 'wiki' assert u.authority == 'MoinMoin' assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i i = 'wiki:///StartSeite?action=raw#body' u = Iri(i) assert u.scheme == 'wiki' assert u.authority == '' assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i
def __call__(self, rev, contenttype=None, arguments=None): item_name = rev.item.name or rev.meta['name'][0] attrib = { xlink.href: Iri(scheme='wiki', authority='', path='/' + item_name, query='do=get&rev={0}'.format(rev.revid)), } a = moin_page.a(attrib=attrib, children=[u"Download {0}.".format(item_name)]) body = moin_page.body(children=(a, )) return moin_page.page(children=(body, ))
def __call__(self, rev, contenttype=None, arguments=None): item_name = rev.item.name attrib = { moin_page.type_: unicode(self.input_type), xlink.href: Iri(scheme='wiki', authority='', path='/' + item_name, query='do=get&rev={0}'.format(rev.revid)), } obj = moin_page.object_(attrib=attrib, children=[u'Your Browser does not support HTML5 audio/video element.', ]) body = moin_page.body(children=(obj, )) return moin_page.page(children=(body, ))
def visit_img(self, element): """ <img src="URI" /> --> <object xlink:href="URI /> """ attrib = {} url = Iri(element.attrib.get('src')) if element.attrib.get('alt'): attrib[html.alt] = element.attrib.get('alt') if element.attrib.get('title'): attrib[html.title_] = element.attrib.get('title') if url.scheme is None: # img tag target = Iri(scheme='wiki.local', path=element.attrib.get("src"), fragment=None) attrib[xinclude.href] = target new_node = xinclude.include(attrib=attrib) else: # object tag attrib[xlink.href] = url new_node = moin_page.object(attrib) return new_node
def process_name(self, member_name): attrib = { xlink.href: Iri(scheme='wiki', authority='', path='/' + self.item_name, query='do=get&member={0}'.format(member_name)), } return moin_page.a(attrib=attrib, children=[ member_name, ])
def inline_freelink_repl(self, stack, freelink, freelink_bang=None, freelink_interwiki_page=None, freelink_interwiki_ref=None, freelink_page=None, freelink_email=None): if freelink_bang: stack.top_append(freelink) return attrib = {} if freelink_page: page = freelink_page.encode('utf-8') if '#' in page: path, fragment = page.rsplit('#', 1) else: path, fragment = page, None link = Iri(scheme='wiki.local', path=path, fragment=fragment) text = freelink_page elif freelink_email: link = 'mailto:' + freelink_email text = freelink_email else: if not is_known_wiki(freelink_interwiki_ref): stack.top_append(freelink) return link = Iri(scheme='wiki', authority=freelink_interwiki_ref, path='/' + freelink_interwiki_page) text = freelink_interwiki_page attrib[xlink.href] = link element = moin_page.a(attrib, children=[text]) stack.top_append(element)
def __call__(self, rev, contenttype=None, arguments=None): item_name = rev.item.name attrib = { moin_page.type_: str(self.input_type), xlink.href: Iri(scheme='wiki', authority='', path='/' + item_name, query='do=get&rev={0}'.format(rev.revid)), } if arguments and html.alt in arguments: attrib[html.alt] = arguments[html.alt] elif rev.meta.get(SUMMARY): attrib[html.alt] = rev.meta[SUMMARY] obj = moin_page.object_(attrib=attrib, children=['Your Browser does not support HTML5 audio/video element.', ]) body = moin_page.body(children=(obj, )) return moin_page.page(children=(body, ))
def build_dom_calendar_table(rows, head=None, caption=None, cls=None): """ Build a DOM table with data from <rows>. """ table = moin_page.table() if cls is not None: table.attrib[moin_page('class')] = cls if caption is not None: table_caption = moin_page.caption() table_caption.append(caption) table.append(table_caption) if head is not None: table_head = moin_page.table_header() table_row = moin_page.table_row() for _idx, cell_tuple in enumerate(head): (cell, cell_class) = cell_tuple table_cell = moin_page.table_cell(children=[cell]) table_cell.attrib[moin_page('class')] = cell_class table_row.append(table_cell) table_head.append(table_row) table.append(table_head) table_body = moin_page.table_body() for row in rows: table_row = moin_page.table_row() for cell_tuple in row: # - cell content # - href for <a> tag # - CSS class for <td> tag (cell, cell_addr, cell_class) = cell_tuple # empty cell if not cell_addr: table_cell = moin_page.table_cell(children=[cell]) table_cell.attrib[moin_page('class')] = cell_class # cell with link to calendar else: table_a = moin_page.a(attrib={xlink.href: Iri(cell_addr)}, children=[cell]) table_cell = moin_page.table_cell(children=[table_a]) table_cell.attrib[moin_page('class')] = cell_class table_row.append(table_cell) table_body.append(table_row) table.append(table_body) return table
def internal_representation(self, attributes=None, preview=None): """ Return the internal representation of a document using a DOM Tree """ doc = cid = None if preview is None: hash_name = HASH_ALGORITHM hash_hexdigest = self.rev.meta.get(hash_name) if hash_hexdigest: cid = cache_key(usage="internal_representation", hash_name=hash_name, hash_hexdigest=hash_hexdigest, attrs=repr(attributes)) doc = app.cache.get(cid) if doc is None: # We will see if we can perform the conversion: # FROM_mimetype --> DOM # if so we perform the transformation, otherwise we don't from moin.converters import default_registry as reg input_conv = reg.get(Type(self.contenttype), type_moin_document) if not input_conv: raise TypeError( "We cannot handle the conversion from {0} to the DOM tree". format(self.contenttype)) smiley_conv = reg.get(type_moin_document, type_moin_document, icon='smiley') # We can process the conversion name = self.rev.fqname.fullname if self.rev else self.name links = Iri(scheme='wiki', authority='', path='/' + name) doc = input_conv(preview or self.rev, self.contenttype, arguments=attributes) # XXX is the following assuming that the top element of the doc tree # is a moin_page.page element? if yes, this is the wrong place to do that # as not every doc will have that element (e.g. for images, we just get # moin_page.object, for a tar item, we get a moin_page.table): doc.set(moin_page.page_href, str(links)) if self.contenttype.startswith(( 'text/x.moin.wiki', 'text/x-mediawiki', 'text/x.moin.creole', )): doc = smiley_conv(doc) if cid: app.cache.set(cid, doc) return doc
def handle_wiki_links(self, elem, input): wiki_name = 'Self' if input.authority and input.authority.host: wn = str(input.authority.host) if is_known_wiki(wn): # interwiki link if html.class_ in elem.attrib: elem.set(moin_page.class_, 'moin-interwiki ' + elem.attrib[html.class_]) else: elem.set(moin_page.class_, 'moin-interwiki') wiki_name = wn item_name = str(input.path[1:]) endpoint, rev, query = self._get_do_rev(input.query) url = url_for_item(item_name, wiki_name=wiki_name, rev=rev, endpoint=endpoint) link = Iri(url, query=query, fragment=input.fragment) elem.set(self._tag_xlink_href, link)
def absolute_path(self, path, current_page_path): """ Converts a relative iri path into an absolute one :param path: the relative path to be converted :type path: Iri.path :param current_page_path: the path of the page where the link is :type current_page_path: Iri.path :returns: the absolute equivalent of the relative path :rtype: Iri.path """ quoted_path = path.quoted # starts from 1 because 0 is always / for the current page quoted_current_page_path = current_page_path[1:].quoted abs_path = AbsItemName(quoted_current_page_path, quoted_path) abs_path = Iri(abs_path).path return abs_path
def handle_wikilocal_links(self, elem, input, page): if input.path: # this can be a relative path, make it absolute: path = input.path if page: path = self.absolute_path(path, page.path) item_name = str(path) if not flaskg.storage.has_item(item_name): # XXX these index accesses slow down the link converter quite a bit elem.set(moin_page.class_, 'moin-nonexistent') else: item_name = str(page.path[1:]) if page else '' endpoint, rev, query = self._get_do_rev(input.query) url = url_for_item(item_name, rev=rev, endpoint=endpoint) if not page: url = url[1:] link = Iri(url, query=query, fragment=input.fragment) elem.set(self._tag_xlink_href, link)
def absolute_path(self, path, current_page_path): """ Converts a relative iri path into an absolute one :param path: the relative path to be converted :type path: Iri.path :param current_page_path: the path of the page where the link is :type current_page_path: Iri.path :returns: the absolute equivalent of the relative path :rtype: Iri.path """ quoted_path = path.quoted # starts from 1 because 0 is always / for the current page quoted_current_page_path = current_page_path[1:].quoted abs_path = AbsItemName(quoted_current_page_path, quoted_path) if quoted_path.startswith('/'): # avoid Iri issue where item name containing a colon is mistaken for scheme:path abs_path = Iri(abs_path).path return abs_path
def macro(self, content, arguments, page_url, alternative): if arguments: item_count = int(arguments[0]) else: item_count = 1 all_item_names = self.get_item_names() # Now select random item from the full list, and if it exists and # we can read it, save. random_item_names = [] found = 0 while found < item_count and all_item_names: # Take one random item from the list item_name = random.choice(all_item_names) all_item_names.remove(item_name) # Filter out items the user may not read. try: item = Item.create(item_name) random_item_names.append(item_name) found += 1 except AccessDenied: pass if not random_item_names: return random_item_names.sort() result = moin_page.span() for name in random_item_names: link = unicode(Iri(scheme=u'wiki', authority=u'', path=u'/' + name)) result.append( moin_page.a(attrib={xlink.href: link}, children=[name])) result.append(", ") del result[-1] # kill last comma return result
def __call__(self, rev, contenttype=None, arguments=None): try: item_name = rev.item.name or rev.meta['name'][0] except IndexError: # item is deleted message = _( 'This deleted item must be restored before it can be viewed or downloaded, ItemID = {itemid}' ).format(itemid=rev.item.itemid) admonition = moin_page.div( attrib={moin_page.class_: 'error'}, children=[moin_page.p(children=[message])]) body = moin_page.body(children=(admonition, )) return moin_page.page(children=(body, )) attrib = { xlink.href: Iri(scheme='wiki', authority='', path='/' + item_name, query='do=get&rev={0}'.format(rev.revid)), } a = moin_page.a(attrib=attrib, children=["Download {0}.".format(item_name)]) body = moin_page.body(children=(a, )) return moin_page.page(children=(body, ))
def run(self, data_dir=None): flaskg.add_lineno_attr = False flaskg.item_name2id = {} userid_old2new = {} indexer = app.storage backend = indexer.backend # backend without indexing print("\nConverting Users...\n") for rev in UserBackend(os.path.join(data_dir, 'user')): # assumes user/ below data_dir global user_names user_names.append(rev.meta['name'][0]) userid_old2new[rev.uid] = rev.meta['itemid'] # map old userid to new userid backend.store(rev.meta, rev.data) print("\nConverting Pages/Attachments...\n") for rev in PageBackend(data_dir, deleted_mode=DELETED_MODE_KILL, default_markup='wiki'): for user_name in user_names: if rev.meta['name'][0] == user_name or rev.meta['name'][0].startswith(user_name + '/'): rev.meta['namespace'] = 'users' break backend.store(rev.meta, rev.data) # item_name to itemid xref required for migrating user subscriptions flaskg.item_name2id[rev.meta['name'][0]] = rev.meta['itemid'] print("\nConverting Revision Editors...\n") for mountpoint, revid in backend: meta, data = backend.retrieve(mountpoint, revid) if USERID in meta: try: meta[USERID] = userid_old2new[meta[USERID]] except KeyError: # user profile lost, but userid referred by revision print(("Missing userid {0!r}, editor of {1} revision {2}".format(meta[USERID], meta[NAME][0], revid)).encode('utf-8')) del meta[USERID] backend.store(meta, data) elif meta.get(CONTENTTYPE) == CONTENTTYPE_USER: meta.pop(UID_OLD, None) # not needed any more backend.store(meta, data) print("\nConverting last revision of Moin 1.9 items to Moin 2.0") self.conv_in = conv_in() self.conv_out = conv_out() reg = default_registry refs_conv = reg.get(type_moin_document, type_moin_document, items='refs') for item_name, (revno, namespace) in sorted(last_moin19_rev.items()): print(' Processing item "{0}", namespace "{1}", revision "{2}"'.format(item_name, namespace, revno)) if namespace == '': namespace = 'default' meta, data = backend.retrieve(namespace, revno) data_in = ''.join(data.readlines()) dom = self.conv_in(data_in, 'text/x.moin.wiki;format=1.9;charset=utf-8') out = self.conv_out(dom) out = out.encode(CHARSET) iri = Iri(scheme='wiki', authority='', path='/' + item_name) dom.set(moin_page.page_href, str(iri)) refs_conv(dom) meta[ITEMLINKS] = refs_conv.get_links() meta[ITEMTRANSCLUSIONS] = refs_conv.get_transclusions() meta[EXTERNALLINKS] = refs_conv.get_external_links() size, hash_name, hash_digest = hash_hexdigest(out) out = BytesIO(out) meta[hash_name] = hash_digest meta[SIZE] = size meta[PARENTID] = meta[REVID] meta[REVID] = make_uuid() meta[REV_NUMBER] = meta[REV_NUMBER] + 1 meta[MTIME] = int(time.time()) meta[COMMENT] = 'Convert moin 1.9 markup to 2.0' meta[CONTENTTYPE] = 'text/x.moin.wiki;charset=utf-8' del meta['dataid'] out.seek(0) backend.store(meta, out) print("\nRebuilding the index...") indexer.close() indexer.destroy() indexer.create() indexer.rebuild() indexer.open() print("Finished conversion!")
def test_Iri_parser(): i = 'http://moinmo.in/StartSeite?action=raw#body' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i i = 'http://moinmo.in/StartSeite?action=raw' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment is None assert unicode(u) == i i = 'http://moinmo.in/StartSeite' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path == '/StartSeite' assert u.query is None assert u.fragment is None assert unicode(u) == i i = 'http://moinmo.in' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path is None assert u.query is None assert u.fragment is None assert unicode(u) == i i = 'http:///StartSeite?action=raw#body' u = Iri(i) assert u.scheme == 'http' assert u.authority == '' assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i i = 'http:///StartSeite?action=raw' u = Iri(i) assert u.scheme == 'http' assert u.authority == '' assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment is None assert unicode(u) == i i = 'http:///StartSeite' u = Iri(i) assert u.scheme == 'http' assert u.authority == '' assert u.path == '/StartSeite' assert u.query is None assert u.fragment is None assert unicode(u) == i i = 'http:///' u = Iri(i) assert u.scheme == 'http' assert u.authority == '' assert u.path == '/' assert u.query is None assert u.fragment is None assert unicode(u) == i i = 'http://' u = Iri(i) assert u.scheme == 'http' assert u.authority == '' assert u.path is None assert u.query is None assert u.fragment is None assert unicode(u) == i i = 'http:' u = Iri(i) assert u.scheme == 'http' assert u.authority is None assert u.path is None assert u.query is None assert u.fragment is None assert unicode(u) == i i = 'http://moinmo.in/StartSeite#body' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path == '/StartSeite' assert u.query is None assert u.fragment == 'body' assert unicode(u) == i i = 'http://moinmo.in#body' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path is None assert u.query is None assert u.fragment == 'body' assert unicode(u) == i i = 'http:#body' u = Iri(i) assert u.scheme == 'http' assert u.authority is None assert u.path is None assert u.query is None assert u.fragment == 'body' assert unicode(u) == i i = 'http://moinmo.in?action=raw#body' u = Iri(i) assert u.scheme == 'http' assert u.authority == 'moinmo.in' assert u.path is None assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i i = 'http:?action=raw#body' u = Iri(i) assert u.scheme == 'http' assert u.authority is None assert u.path is None assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i i = 'http:/StartSeite?action=raw#body' u = Iri(i) assert u.scheme == 'http' assert u.authority is None assert u.path == '/StartSeite' assert u.query == 'action=raw' assert u.fragment == 'body' assert unicode(u) == i