Exemple #1
0
 def test_mixed(self):
     '''
     Cannot mix tokens and non-tokens at same level.
     '''
     bad = Token(Any()) & Any()
     try:
         bad.get_parse()
         assert False, 'expected failure'
     except LexerError as err:
         assert str(err) == 'The grammar contains a mix of Tokens and ' \
                            'non-Token matchers at the top level. If ' \
                            'Tokens are used then non-token matchers ' \
                            'that consume input must only appear "inside" ' \
                            'Tokens.  The non-Token matchers include: ' \
                            'Any(None).', str(err)
     else:
         assert False, 'wrong exception'
Exemple #2
0
 def test_mixed(self):
     '''
     Cannot mix tokens and non-tokens at same level.
     '''
     bad = Token(Any()) & Any()
     try:
         bad.get_parse()
         assert False, 'expected failure'
     except LexerError as err:
         assert str(err) == 'The grammar contains a mix of Tokens and ' \
                            'non-Token matchers at the top level. If ' \
                            'Tokens are used then non-token matchers ' \
                            'that consume input must only appear "inside" ' \
                            'Tokens.  The non-Token matchers include: ' \
                            'Any(None).', str(err)
     else:
         assert False, 'wrong exception'
Exemple #3
0
 def test_string_arg(self):
     '''
     Skip anything(not just spaces)
     '''
     words = Token('[a-z]+')[:]
     words.config.lexer(discard='.')
     parser = words.get_parse()
     results = parser('abc defXghi')
     assert results == ['abc', 'def', 'ghi'], results
Exemple #4
0
 def test_string_arg(self):
     '''
     Skip anything(not just spaces)
     '''
     words = Token('[a-z]+')[:]
     words.config.lexer(discard='.')
     parser = words.get_parse()
     results = parser('abc defXghi')
     assert results == ['abc', 'def', 'ghi'], results
Exemple #5
0
 def test_bad_error_msg(self):
     '''
     An ugly error message (can't we improve this?)
     '''
     #basicConfig(level=DEBUG)
     words = Token('[a-z]+')[:]
     words.config.lexer()
     parser = words.get_parse()
     try:
         parser('abc defXghi')
         assert False, 'expected error'
     except RuntimeLexerError as err:
         assert str(err) == "No lexer for 'Xghi' at line 1 " \
             "character 7 of str: 'abc defXghi'.", str(err)
Exemple #6
0
 def test_bad_error_msg(self):
     '''
     An ugly error message (can't we improve this?)
     '''
     #basicConfig(level=DEBUG)
     words = Token('[a-z]+')[:]
     words.config.lexer()
     parser = words.get_parse()
     try:
         parser('abc defXghi')
         assert False, 'expected error'
     except RuntimeLexerError as err:
         assert str(err) == "No lexer for 'Xghi' at line 1 " \
             "character 7 of str: 'abc defXghi'.", str(err)
Exemple #7
0
 def test_bad_space(self):
     '''
     An unexpected character fails to match.
     '''
     token = Token('a')
     token.config.clear().lexer(discard='b')
     parser = token.get_parse()
     assert parser('a') == ['a'], parser('a')
     assert parser('b') == None, parser('b')
     try:
         parser('c')
         assert False, 'expected failure'
     except RuntimeLexerError as err:
         assert str(err) == "No lexer for 'c' at line 1 " \
             "character 0 of str: 'c'.", str(err)
Exemple #8
0
 def test_bad_space(self):
     '''
     An unexpected character fails to match.
     '''
     token = Token('a')
     token.config.clear().lexer(discard='b')
     parser = token.get_parse()
     assert parser('a') == ['a'], parser('a')
     assert parser('b') == None, parser('b')
     try:
         parser('c')
         assert False, 'expected failure'
     except RuntimeLexerError as err:
         assert str(err) == "No lexer for 'c' at line 1 " \
             "character 0 of str: 'c'.", str(err)