Пример #1
0
def test_maintain_duplicate_attribute_order():
    # This is here because we impl it in parser and not tokenizer
    p = HTMLParser()
    attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
    token = {'name': 'html',
             'selfClosing': False,
             'selfClosingAcknowledged': False,
             'type': tokenTypes["StartTag"],
             'data': attrs + [('a', len(attrs))]}
    out = p.normalizeToken(token)
    attr_order = list(out["data"].keys())
    assert attr_order == [x for x, i in attrs]
Пример #2
0
def test_maintain_attribute_order():
    # This is here because we impl it in parser and not tokenizer
    p = HTMLParser()
    # generate loads to maximize the chance a hash-based mutation will occur
    attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
    token = {'name': 'html',
             'selfClosing': False,
             'selfClosingAcknowledged': False,
             'type': tokenTypes["StartTag"],
             'data': attrs}
    out = p.normalizeToken(token)
    attr_order = list(out["data"].keys())
    assert attr_order == [x for x, i in attrs]
Пример #3
0
def test_maintain_duplicate_attribute_order():
    # This is here because we impl it in parser and not tokenizer
    p = HTMLParser()
    attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
    token = {
        'name': 'html',
        'selfClosing': False,
        'selfClosingAcknowledged': False,
        'type': tokenTypes["StartTag"],
        'data': attrs + [('a', len(attrs))]
    }
    out = p.normalizeToken(token)
    attr_order = list(out["data"].keys())
    assert attr_order == [x for x, i in attrs]
Пример #4
0
def test_maintain_attribute_order():
    # This is here because we impl it in parser and not tokenizer
    p = HTMLParser()
    # generate loads to maximize the chance a hash-based mutation will occur
    attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
    token = {
        'name': 'html',
        'selfClosing': False,
        'selfClosingAcknowledged': False,
        'type': tokenTypes["StartTag"],
        'data': attrs
    }
    out = p.normalizeToken(token)
    attr_order = list(out["data"].keys())
    assert attr_order == [x for x, i in attrs]