示例#1
0
 def test_tokenizer(self):
     tokens = [
         _unicode(item) for item in tokenize(
             u(r'E\ é > f [a~="y\"x"]:nth(/* fu /]* */-3.7)'))
     ]
     assert tokens == [
         u("<IDENT 'E é' at 0>"),
         "<S ' ' at 4>",
         "<DELIM '>' at 5>",
         "<S ' ' at 6>",
         # the no-break space is not whitespace in CSS
         u("<IDENT 'f ' at 7>"),  # f\xa0
         "<DELIM '[' at 9>",
         "<IDENT 'a' at 10>",
         "<DELIM '~' at 11>",
         "<DELIM '=' at 12>",
         "<STRING 'y\"x' at 13>",
         "<DELIM ']' at 19>",
         "<DELIM ':' at 20>",
         "<IDENT 'nth' at 21>",
         "<DELIM '(' at 24>",
         "<NUMBER '-3.7' at 37>",
         "<DELIM ')' at 41>",
         "<EOF at 42>",
     ]
示例#2
0
 def test_tokenizer(self):
     tokens = [repr(item).replace("u'", "'")
               for item in tokenize('E > f[a~="y\\"x"]')]
     assert tokens == [
         "Symbol('E', 0)",
         "Token(' ', 1)",
         "Token('>', 2)",
         "Token(' ', 3)",
         "Symbol('f', 4)",
         "Token('[', 5)",
         "Symbol('a', 6)",
         "Token('~=', 7)",
         "String('y\"x', 9)",
         "Token(']', 15)"]
示例#3
0
 def test_tokenizer(self):
     tokens = [
         _unicode(item) for item in tokenize(
             u(r'E\ é > f [a~="y\"x"]:nth(/* fu /]* */-3.7)'))]
     assert tokens == [
         u("<IDENT 'E é' at 0>"),
         "<S ' ' at 4>",
         "<DELIM '>' at 5>",
         "<S ' ' at 6>",
         # the no-break space is not whitespace in CSS
         u("<IDENT 'f ' at 7>"),  # f\xa0
         "<DELIM '[' at 9>",
         "<IDENT 'a' at 10>",
         "<DELIM '~' at 11>",
         "<DELIM '=' at 12>",
         "<STRING 'y\"x' at 13>",
         "<DELIM ']' at 19>",
         "<DELIM ':' at 20>",
         "<IDENT 'nth' at 21>",
         "<DELIM '(' at 24>",
         "<NUMBER '-3.7' at 37>",
         "<DELIM ')' at 41>",
         "<EOF at 42>",
     ]