def parse(actions, spdOutput, two_stageParsing): tokens = CommonTokenStream(SPDGrammarLexer(InputStream(spdOutput))) parser = SPDGrammarParser(tokens) visitor = SPDGrammarVisitorImplementation(actions) if not two_stageParsing: visitor.visit(parser.json()) return visitor._errors parser._interp.predictionMode = PredictionMode.SLL parser.removeErrorListeners() parser._errHandler = BailErrorStrategy() try: visitor.visit(parser.json()) except RuntimeError as exception: if isinstance(exception, RecognitionException): tokens.seek(0) parser.addErrorListener(ConsoleErrorListener.INSTANCE) parser._errHandler = DefaultErrorStrategy() parser._interp.predictionMode = PredictionMode.LL visitor.visit(parser.json()) return visitor._errors
def compileTreePattern(self, pattern: str, patternRuleIndex: int): tokenList = self.tokenize(pattern) tokenSrc = ListTokenSource(tokenList) tokens = CommonTokenStream(tokenSrc) from antlr4.ParserInterpreter import ParserInterpreter parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames, self.parser.ruleNames, self.parser.getATNWithBypassAlts(), tokens) tree = None try: parserInterp.setErrorHandler(BailErrorStrategy()) tree = parserInterp.parse(patternRuleIndex) except ParseCancellationException as e: raise e.cause except RecognitionException as e: raise e except Exception as e: raise CannotInvokeStartRule(e) # Make sure tree pattern compilation checks for a complete parse if tokens.LA(1) != Token.EOF: raise StartRuleDoesNotConsumeFullPattern() from antlr4.tree.ParseTreePattern import ParseTreePattern return ParseTreePattern(self, pattern, patternRuleIndex, tree)
def print_lex(text: str): lexer = DemystifyLexer(InputStream(text)) stream = CommonTokenStream(lexer) stream.fill() for token in stream.tokens: print('%s: %s' % (token.text, DemystifyLexer.symbolicNames[token.type]))
def testInsertAfterLastIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertAfter(10, 'x') self.assertEqual(rewriter.getDefaultText(), 'abcx')
def testInsertBeforeIndexZero(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, '0') self.assertEquals(rewriter.getDefaultText(), '0abc')
def testInsertBeforeIndexZero(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, '0') self.assertEqual(rewriter.getDefaultText(), '0abc')
def testInsertAfterLastIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertAfter(10, 'x') self.assertEquals(rewriter.getDefaultText(), 'abcx')
def testReplaceAll(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 6, 'x') self.assertEquals('x', rewriter.getDefaultText())
def testReplaceAll(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 6, 'x') self.assertEqual('x', rewriter.getDefaultText())
def testReplaceSubsetThenFetch(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'xyz') self.assertEqual('abxyzba', rewriter.getDefaultText())
def testReplaceMiddleIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceIndex(1, 'x') self.assertEquals(rewriter.getDefaultText(), 'axc')
def testReplaceMiddleIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceIndex(1, 'x') self.assertEqual(rewriter.getDefaultText(), 'axc')
def testReplaceSubsetThenFetch(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'xyz') self.assertEquals('abxyzba', rewriter.getDefaultText())
def testCombineInsertOnLeftWithDelete(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.delete('default', 0, 2) rewriter.insertBeforeIndex(0, 'z') self.assertEqual('z', rewriter.getDefaultText())
def testCombineInsertOnLeftWithReplace(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 2, 'foo') rewriter.insertBeforeIndex(0, 'z') self.assertEqual('zfoo', rewriter.getDefaultText())
def testCombineInserts(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, 'x') rewriter.insertBeforeIndex(0, 'y') self.assertEquals('yxabc', rewriter.getDefaultText())
def testCombineInserts(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, 'x') rewriter.insertBeforeIndex(0, 'y') self.assertEqual('yxabc', rewriter.getDefaultText())
def testReplaceSingleMiddleThenOverlappingSuperset(self): input = InputStream('abcba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceIndex(2, 'xyz') rewriter.replaceRange(0, 3, 'foo') self.assertEqual('fooa', rewriter.getDefaultText())
def testDropIdenticalReplace(self): input = InputStream('abcc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(1, 2, 'foo') rewriter.replaceRange(1, 2, 'foo') self.assertEqual('afooc', rewriter.getDefaultText())
def testInsertBeforeTokenThenDeleteThatToken(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'foo') rewriter.replaceRange(1, 2, 'foo') self.assertEqual('afoofoo', rewriter.getDefaultText())
def testCombineInsertOnLeftWithDelete(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.delete('default', 0, 2) rewriter.insertBeforeIndex(0, 'z') self.assertEquals('z', rewriter.getDefaultText())
def testInsertBeforeTokenThenDeleteThatToken(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'foo') rewriter.replaceRange(1, 2, 'foo') self.assertEquals('afoofoo', rewriter.getDefaultText())
def testReplaceThenInsertAfterLastIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceIndex(2, 'x') rewriter.insertAfter(2, 'y') self.assertEqual('abxy', rewriter.getDefaultText())
def testInsertThenReplaceSameIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, '0') rewriter.replaceIndex(0, 'x') self.assertEqual('0xbc', rewriter.getDefaultText())
def testDropIdenticalReplace(self): input = InputStream('abcc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(1, 2, 'foo') rewriter.replaceRange(1, 2, 'foo') self.assertEquals('afooc', rewriter.getDefaultText())
def testOverlappingReplace4(self): input = InputStream('abcc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(1, 2, 'foo') rewriter.replaceRange(1, 3, 'bar') self.assertEqual('abar', rewriter.getDefaultText())
def testLeaveAloneDisjointInsert2(self): input = InputStream('abcc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 3, 'foo') rewriter.insertBeforeIndex(1, 'x') self.assertEquals('axbfoo', rewriter.getDefaultText())
def testDropPrevCoveredInsert(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'foo') rewriter.replaceRange(1, 2, 'foo') self.assertEqual('afoofoo', rewriter.getDefaultText())
def testDropPrevCoveredInsert(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'foo') rewriter.replaceRange(1, 2, 'foo') self.assertEquals('afoofoo', rewriter.getDefaultText())
def testOverlappingReplace4(self): input = InputStream('abcc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(1, 2, 'foo') rewriter.replaceRange(1, 3, 'bar') self.assertEquals('abar', rewriter.getDefaultText())
def main(fileName): fs = FileStream(fileName) lexer = RuleLexerPy(fs) stream = CommonTokenStream(lexer) nst: int = stream.getNumberOfOnChannelTokens() print("number of tokens:", nst, "\n") for el in stream.tokens: print(el) parser = RuleParserPy(stream) rootContext = parser.file_input() pass
def testReplaceRangeThenInsertAfterRightEdge(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'x') rewriter.insertAfter(4, 'y') self.assertEqual('abxyba', rewriter.getDefaultText())
def test2InsertBeforeAfterMiddleIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'x') rewriter.insertAfter(1, 'x') self.assertEqual(rewriter.getDefaultText(), 'axbxc')
def testReplaceSingleMiddleThenOverlappingSuperset(self): input = InputStream('abcba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceIndex(2, 'xyz') rewriter.replaceRange(0, 3, 'foo') self.assertEquals('fooa', rewriter.getDefaultText())
def test2InsertBeforeAfterMiddleIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'x') rewriter.insertAfter(1, 'x') self.assertEquals(rewriter.getDefaultText(), 'axbxc')
def testLeaveAloneDisjointInsert2(self): input = InputStream('abcc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 3, 'foo') rewriter.insertBeforeIndex(1, 'x') self.assertEqual('axbfoo', rewriter.getDefaultText())
def testReplaceThenInsertAfterLastIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceIndex(2, 'x') rewriter.insertAfter(2, 'y') self.assertEquals('abxy', rewriter.getDefaultText())
def testReplaceRangeThenInsertAfterRightEdge(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'x') rewriter.insertAfter(4, 'y') self.assertEquals('abxyba', rewriter.getDefaultText())
def testInsertThenReplaceSameIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, '0') rewriter.replaceIndex(0, 'x') self.assertEquals('0xbc', rewriter.getDefaultText())
def testCombineInsertOnLeftWithReplace(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 2, 'foo') rewriter.insertBeforeIndex(0, 'z') self.assertEquals('zfoo', rewriter.getDefaultText())
def tokens(self, text): """Returns a list of *tokens*. This method uses the *lexer* wrapped in a ``antlr4.CommonTokenStream`` to obtain the list of tokens (calling the ``fill`` method). Args: text (str): the text to be processed by the *lexer*. """ lexer = self.Lexer(InputStream(text)) stream = CommonTokenStream(lexer) stream.fill() return stream.tokens
def testDisjointInserts(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'x') rewriter.insertBeforeIndex(2, 'y') rewriter.insertBeforeIndex(0, 'z') self.assertEquals('zaxbyc', rewriter.getDefaultText())
def test2ReplaceMiddleIndex1InsertBefore(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, "_") rewriter.replaceIndex(1, 'x') rewriter.replaceIndex(1, 'y') self.assertEqual('_ayc', rewriter.getDefaultText())
def testToStringStartStop(self): input = InputStream('x = 3 * 0;') lexer = TestLexer2(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(4, 8, '0') self.assertEqual(rewriter.getDefaultText(), 'x = 0;') self.assertEqual(rewriter.getText('default', 0, 9), 'x = 0;') self.assertEqual(rewriter.getText('default', 4, 8), '0')
def test2ReplaceMiddleIndex1InsertBefore(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, "_") rewriter.replaceIndex(1, 'x') rewriter.replaceIndex(1, 'y') self.assertEquals('_ayc', rewriter.getDefaultText())
def testToStringStartStop(self): input = InputStream('x = 3 * 0;') lexer = TestLexer2(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(4, 8, '0') self.assertEquals(rewriter.getDefaultText(), 'x = 0;') self.assertEquals(rewriter.getText('default', 0, 9), 'x = 0;') self.assertEquals(rewriter.getText('default', 4, 8), '0')
def testDisjointInserts(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(1, 'x') rewriter.insertBeforeIndex(2, 'y') rewriter.insertBeforeIndex(0, 'z') self.assertEqual('zaxbyc', rewriter.getDefaultText())
def testReplaceThenDeleteMiddleIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 2, 'x') rewriter.insertBeforeIndex(1, '0') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() self.assertEquals( 'insert op <InsertBeforeOp@[@1,1:1=\'b\',<2>,1:1]:"0"> within boundaries of previous <ReplaceOp@[@0,0:0=\'a\',<1>,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">', ctx.exception.message)
def testReplaceThenDeleteMiddleIndex(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 2, 'x') rewriter.insertBeforeIndex(1, '0') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() self.assertEqual( 'insert op <InsertBeforeOp@[@1,1:1=\'b\',<2>,1:1]:"0"> within boundaries of previous <ReplaceOp@[@0,0:0=\'a\',<1>,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">', str(ctx.exception) )
def testPreservesOrderOfContiguousInserts(self): """ Test for fix for: https://github.com/antlr/antlr4/issues/550 """ input = InputStream('aa') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, '<b>') rewriter.insertAfter(0, '</b>') rewriter.insertBeforeIndex(1, '<b>') rewriter.insertAfter(1, '</b>') self.assertEquals('<b>a</b><b>a</b>', rewriter.getDefaultText())
def testPreservesOrderOfContiguousInserts(self): """ Test for fix for: https://github.com/antlr/antlr4/issues/550 """ input = InputStream('aa') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.insertBeforeIndex(0, '<b>') rewriter.insertAfter(0, '</b>') rewriter.insertBeforeIndex(1, '<b>') rewriter.insertAfter(1, '</b>') self.assertEqual('<b>a</b><b>a</b>', rewriter.getDefaultText())
def testOverlappingReplace2(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 3, 'bar') rewriter.replaceRange(1, 2, 'foo') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() self.assertEquals( """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@3,3:2='<EOF>',<-1>,1:3]:"bar">""", ctx.exception.message)
def testReplaceThenReplaceLowerIndexedSuperset(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'xyz') rewriter.replaceRange(1, 3, 'foo') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() msg = ctx.exception.message self.assertEquals( """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""", msg)
def testReplaceRangeThenInsertAtRightEdge(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'x') rewriter.insertBeforeIndex(4, 'y') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() msg = str(ctx.exception) self.assertEqual( "insert op <InsertBeforeOp@[@4,4:4='c',<3>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">", msg )
def testReplaceThenReplaceLowerIndexedSuperset(self): input = InputStream('abcccba') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(2, 4, 'xyz') rewriter.replaceRange(1, 3, 'foo') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() msg = str(ctx.exception) self.assertEqual( """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""", msg )
def testOverlappingReplace2(self): input = InputStream('abc') lexer = TestLexer(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) rewriter.replaceRange(0, 3, 'bar') rewriter.replaceRange(1, 2, 'foo') with self.assertRaises(ValueError) as ctx: rewriter.getDefaultText() self.assertEqual( """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@3,3:2='<EOF>',<-1>,1:3]:"bar">""", str(ctx.exception) )
def testToStringStartStop2(self): input = InputStream('x = 3 * 0 + 2 * 0;') lexer = TestLexer2(input) stream = CommonTokenStream(lexer=lexer) stream.fill() rewriter = TokenStreamRewriter(tokens=stream) self.assertEqual('x = 3 * 0 + 2 * 0;', rewriter.getDefaultText()) # replace 3 * 0 with 0 rewriter.replaceRange(4, 8, '0') self.assertEqual('x = 0 + 2 * 0;', rewriter.getDefaultText()) self.assertEqual('x = 0 + 2 * 0;', rewriter.getText('default', 0, 17)) self.assertEqual('0', rewriter.getText('default', 4, 8)) self.assertEqual('x = 0', rewriter.getText('default', 0, 8)) self.assertEqual('2 * 0', rewriter.getText('default', 12, 16)) rewriter.insertAfter(17, "// comment") self.assertEqual('2 * 0;// comment', rewriter.getText('default', 12, 18)) self.assertEqual('x = 0', rewriter.getText('default', 0, 8))