def testReplaceThenDeleteMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 2, 'x')
        rewriter.insertBeforeIndex(1, '0')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()
        self.assertEquals(
            'insert op <InsertBeforeOp@[@1,1:1=\'b\',<2>,1:1]:"0"> within boundaries of previous <ReplaceOp@[@0,0:0=\'a\',<1>,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">',
            ctx.exception.message)
Esempio n. 2
0
    def testReplaceThenDeleteMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 2, 'x')
        rewriter.insertBeforeIndex(1, '0')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()
        self.assertEqual(
            'insert op <InsertBeforeOp@[@1,1:1=\'b\',<2>,1:1]:"0"> within boundaries of previous <ReplaceOp@[@0,0:0=\'a\',<1>,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">',
            str(ctx.exception)
        )
    def testReplaceThenReplaceLowerIndexedSuperset(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'xyz')
        rewriter.replaceRange(1, 3, 'foo')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()
        msg = ctx.exception.message
        self.assertEquals(
            """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""",
            msg)
    def testOverlappingReplace2(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 3, 'bar')
        rewriter.replaceRange(1, 2, 'foo')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()

        self.assertEquals(
            """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@3,3:2='<EOF>',<-1>,1:3]:"bar">""",
            ctx.exception.message)
    def testReplaceRangeThenInsertAtRightEdge(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'x')
        rewriter.insertBeforeIndex(4, 'y')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()
        msg = ctx.exception.message
        self.assertEquals(
            "insert op <InsertBeforeOp@[@4,4:4='c',<3>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">",
            msg)
Esempio n. 6
0
    def testReplaceRangeThenInsertAtRightEdge(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'x')
        rewriter.insertBeforeIndex(4, 'y')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()
        msg = str(ctx.exception)
        self.assertEqual(
            "insert op <InsertBeforeOp@[@4,4:4='c',<3>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">",
            msg
        )
Esempio n. 7
0
    def testOverlappingReplace2(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 3, 'bar')
        rewriter.replaceRange(1, 2, 'foo')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()

        self.assertEqual(
            """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@3,3:2='<EOF>',<-1>,1:3]:"bar">""",
            str(ctx.exception)
        )
Esempio n. 8
0
    def testReplaceThenReplaceLowerIndexedSuperset(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'xyz')
        rewriter.replaceRange(1, 3, 'foo')

        with self.assertRaises(ValueError) as ctx:
            rewriter.getDefaultText()
        msg = str(ctx.exception)
        self.assertEqual(
            """replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""",
            msg
        )
Esempio n. 9
0
    def testInsertBeforeIndexZero(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertBeforeIndex(0, '0')

        self.assertEqual(rewriter.getDefaultText(), '0abc')
    def testInsertBeforeIndexZero(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertBeforeIndex(0, '0')

        self.assertEquals(rewriter.getDefaultText(), '0abc')
Esempio n. 11
0
    def testInsertAfterLastIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertAfter(10, 'x')

        self.assertEqual(rewriter.getDefaultText(), 'abcx')
    def testInsertAfterLastIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertAfter(10, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'abcx')
Esempio n. 13
0
    def testReplaceSubsetThenFetch(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'xyz')

        self.assertEqual('abxyzba', rewriter.getDefaultText())
    def testReplaceSubsetThenFetch(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'xyz')

        self.assertEquals('abxyzba', rewriter.getDefaultText())
Esempio n. 15
0
    def testReplaceAll(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 6, 'x')

        self.assertEqual('x', rewriter.getDefaultText())
Esempio n. 16
0
    def testReplaceMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(1, 'x')

        self.assertEqual(rewriter.getDefaultText(), 'axc')
Esempio n. 17
0
def save(rewriter: TokenStreamRewriter,
         file_name: str,
         filename_mapping=lambda x: x + ".rewritten.java"):
    new_filename = filename_mapping(file_name).replace("\\", "/")
    path = new_filename[:new_filename.rfind('/')]
    if not os.path.exists(path):
        os.makedirs(path)
    with open(new_filename, mode='w', newline='') as file:
        print("write?", new_filename)
        file.write(rewriter.getDefaultText())
    def testReplaceAll(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 6, 'x')

        self.assertEquals('x', rewriter.getDefaultText())
    def testReplaceMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(1, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'axc')
Esempio n. 20
0
    def testInsertThenReplaceSameIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, '0')
        rewriter.replaceIndex(0, 'x')

        self.assertEqual('0xbc', rewriter.getDefaultText())
    def testLeaveAloneDisjointInsert2(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 3, 'foo')
        rewriter.insertBeforeIndex(1, 'x')

        self.assertEquals('axbfoo', rewriter.getDefaultText())
    def testDropPrevCoveredInsert(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEqual('afoofoo', rewriter.getDefaultText())
Esempio n. 23
0
    def testLeaveAloneDisjointInsert2(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 3, 'foo')
        rewriter.insertBeforeIndex(1, 'x')

        self.assertEqual('axbfoo', rewriter.getDefaultText())
Esempio n. 24
0
    def testDropIdenticalReplace(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEqual('afooc', rewriter.getDefaultText())
    def testDropIdenticalReplace(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afooc', rewriter.getDefaultText())
Esempio n. 26
0
    def testReplaceRangeThenInsertAfterRightEdge(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'x')
        rewriter.insertAfter(4, 'y')

        self.assertEqual('abxyba', rewriter.getDefaultText())
Esempio n. 27
0
    def test2InsertBeforeAfterMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'x')
        rewriter.insertAfter(1, 'x')

        self.assertEqual(rewriter.getDefaultText(), 'axbxc')
Esempio n. 28
0
    def testCombineInserts(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, 'x')
        rewriter.insertBeforeIndex(0, 'y')

        self.assertEqual('yxabc', rewriter.getDefaultText())
Esempio n. 29
0
    def testReplaceSingleMiddleThenOverlappingSuperset(self):
        input = InputStream('abcba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(2, 'xyz')
        rewriter.replaceRange(0, 3, 'foo')

        self.assertEqual('fooa', rewriter.getDefaultText())
    def testReplaceThenInsertAfterLastIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(2, 'x')
        rewriter.insertAfter(2, 'y')

        self.assertEquals('abxy', rewriter.getDefaultText())
    def testInsertBeforeTokenThenDeleteThatToken(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afoofoo', rewriter.getDefaultText())
Esempio n. 32
0
    def testReplaceThenInsertAfterLastIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(2, 'x')
        rewriter.insertAfter(2, 'y')

        self.assertEqual('abxy', rewriter.getDefaultText())
Esempio n. 33
0
    def testCombineInsertOnLeftWithDelete(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.delete('default', 0, 2)
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEqual('z', rewriter.getDefaultText())
Esempio n. 34
0
    def testCombineInsertOnLeftWithReplace(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 2, 'foo')
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEqual('zfoo', rewriter.getDefaultText())
Esempio n. 35
0
    def testDropPrevCoveredInsert(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afoofoo', rewriter.getDefaultText())
    def testInsertThenReplaceSameIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, '0')
        rewriter.replaceIndex(0, 'x')

        self.assertEquals('0xbc', rewriter.getDefaultText())
Esempio n. 37
0
    def testOverlappingReplace4(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 3, 'bar')

        self.assertEqual('abar', rewriter.getDefaultText())
    def testOverlappingReplace4(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 3, 'bar')

        self.assertEquals('abar', rewriter.getDefaultText())
    def testCombineInsertOnLeftWithReplace(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 2, 'foo')
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEquals('zfoo', rewriter.getDefaultText())
Esempio n. 40
0
    def testInsertBeforeTokenThenDeleteThatToken(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEqual('afoofoo', rewriter.getDefaultText())
    def testCombineInsertOnLeftWithDelete(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.delete('default', 0, 2)
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEquals('z', rewriter.getDefaultText())
    def testCombineInserts(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, 'x')
        rewriter.insertBeforeIndex(0, 'y')

        self.assertEquals('yxabc', rewriter.getDefaultText())
    def test2InsertBeforeAfterMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'x')
        rewriter.insertAfter(1, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'axbxc')
    def testReplaceRangeThenInsertAfterRightEdge(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'x')
        rewriter.insertAfter(4, 'y')

        self.assertEquals('abxyba', rewriter.getDefaultText())
    def testReplaceSingleMiddleThenOverlappingSuperset(self):
        input = InputStream('abcba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(2, 'xyz')
        rewriter.replaceRange(0, 3, 'foo')

        self.assertEquals('fooa', rewriter.getDefaultText())
Esempio n. 46
0
    def testToStringStartStop2(self):
        input = InputStream('x = 3 * 0 + 2 * 0;')
        lexer = TestLexer2(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        self.assertEquals('x = 3 * 0 + 2 * 0;', rewriter.getDefaultText())

        # replace 3 * 0 with 0
        rewriter.replaceRange(4, 8, '0')
        self.assertEquals('x = 0 + 2 * 0;', rewriter.getDefaultText())
        self.assertEquals('x = 0 + 2 * 0;', rewriter.getText('default', Interval(0, 17)))
        self.assertEquals('0', rewriter.getText('default', Interval(4, 8)))
        self.assertEquals('x = 0', rewriter.getText('default', Interval(0, 8)))
        self.assertEquals('2 * 0', rewriter.getText('default', Interval(12, 16)))

        rewriter.insertAfter(17, "// comment")
        self.assertEquals('2 * 0;// comment', rewriter.getText('default', Interval(12, 18)))

        self.assertEquals('x = 0', rewriter.getText('default', Interval(0, 8)))
Esempio n. 47
0
    def testDisjointInserts(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'x')
        rewriter.insertBeforeIndex(2, 'y')
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEquals('zaxbyc', rewriter.getDefaultText())
Esempio n. 48
0
    def test2ReplaceMiddleIndex1InsertBefore(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, "_")
        rewriter.replaceIndex(1, 'x')
        rewriter.replaceIndex(1, 'y')

        self.assertEqual('_ayc', rewriter.getDefaultText())
Esempio n. 49
0
    def testToStringStartStop(self):
        input = InputStream('x = 3 * 0;')
        lexer = TestLexer2(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(4, 8, '0')

        self.assertEqual(rewriter.getDefaultText(), 'x = 0;')
        self.assertEqual(rewriter.getText('default', 0, 9), 'x = 0;')
        self.assertEqual(rewriter.getText('default', 4, 8), '0')
Esempio n. 50
0
    def testToStringStartStop2(self):
        input = InputStream('x = 3 * 0 + 2 * 0;')
        lexer = TestLexer2(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        self.assertEqual('x = 3 * 0 + 2 * 0;', rewriter.getDefaultText())

        # replace 3 * 0 with 0
        rewriter.replaceRange(4, 8, '0')
        self.assertEqual('x = 0 + 2 * 0;', rewriter.getDefaultText())
        self.assertEqual('x = 0 + 2 * 0;', rewriter.getText('default', 0, 17))
        self.assertEqual('0', rewriter.getText('default', 4, 8))
        self.assertEqual('x = 0', rewriter.getText('default', 0, 8))
        self.assertEqual('2 * 0', rewriter.getText('default', 12, 16))

        rewriter.insertAfter(17, "// comment")
        self.assertEqual('2 * 0;// comment', rewriter.getText('default', 12, 18))

        self.assertEqual('x = 0', rewriter.getText('default', 0, 8))
    def test2ReplaceMiddleIndex1InsertBefore(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, "_")
        rewriter.replaceIndex(1, 'x')
        rewriter.replaceIndex(1, 'y')

        self.assertEquals('_ayc', rewriter.getDefaultText())
    def testToStringStartStop(self):
        input = InputStream('x = 3 * 0;')
        lexer = TestLexer2(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(4, 8, '0')

        self.assertEquals(rewriter.getDefaultText(), 'x = 0;')
        self.assertEquals(rewriter.getText('default', 0, 9), 'x = 0;')
        self.assertEquals(rewriter.getText('default', 4, 8), '0')
    def testDisjointInserts(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'x')
        rewriter.insertBeforeIndex(2, 'y')
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEqual('zaxbyc', rewriter.getDefaultText())
    def testPreservesOrderOfContiguousInserts(self):
        """
        Test for fix for: https://github.com/antlr/antlr4/issues/550
        """
        input = InputStream('aa')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, '<b>')
        rewriter.insertAfter(0, '</b>')
        rewriter.insertBeforeIndex(1, '<b>')
        rewriter.insertAfter(1, '</b>')

        self.assertEquals('<b>a</b><b>a</b>', rewriter.getDefaultText())
Esempio n. 55
0
    def testPreservesOrderOfContiguousInserts(self):
        """
        Test for fix for: https://github.com/antlr/antlr4/issues/550
        """
        input = InputStream('aa')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, '<b>')
        rewriter.insertAfter(0, '</b>')
        rewriter.insertBeforeIndex(1, '<b>')
        rewriter.insertAfter(1, '</b>')

        self.assertEqual('<b>a</b><b>a</b>', rewriter.getDefaultText())
Esempio n. 56
0
class MoveClassRefactoringListener(JavaParserLabeledListener):
    """
    To implement the move class refactoring
    a stream of tokens is sent to the listener, to build an object token_stream_rewriter
    and we move all class methods and fields from the source package to the target package
    """

    def __init__(self, common_token_stream: CommonTokenStream = None, class_identifier: str = None,
                 source_package: str = None, target_package: str = None, filename: str = None, dirname: str = None):
        """
        :param common_token_stream: used to edit the content of the parsers
        """
        self.enter_class = False
        self.token_stream = common_token_stream
        self.class_found = False
        self.class_fields = []
        self.class_methods = []

        # Move all the tokens in the source code in a buffer, token_stream_rewriter.
        if common_token_stream is not None:
            self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
        else:
            raise TypeError('common_token_stream is None')

        if class_identifier is not None:
            self.class_identifier = class_identifier
        else:
            raise ValueError("class_identifier is None")

        if filename is not None:
            self.filename = filename
        else:
            raise ValueError("filename is None")

        if dirname is not None:
            self.dirname = dirname
        else:
            raise ValueError("dirname is None")

        if source_package is not None:
            self.source_package = source_package
        else:
            raise ValueError("source_package is None")

        if target_package is not None:
            self.target_package = target_package
        else:
            raise ValueError("target_package is None")

        self.TAB = "\t"
        self.NEW_LINE = "\n"
        self.code = f"package {self.target_package};{self.NEW_LINE}{self.NEW_LINE}"

    # Exit a parse tree produced by JavaParserLabeled#importDeclaration.
    def exitImportDeclaration(self, ctx: JavaParserLabeled.ImportDeclarationContext):
        text_to_replace = "import " + ctx.qualifiedName().getText() + ';'
        if ctx.STATIC() is not None:
            text_to_replace = text_to_replace.replace("import", "import static")

        self.code += text_to_replace + self.NEW_LINE

    # Enter a parse tree produced by JavaParserLabeled#packageDeclaration.
    def enterPackageDeclaration(self, ctx: JavaParserLabeled.PackageDeclarationContext):
        package_name = ctx.getText()[7:-1]
        print(package_name)
        if package_name != self.source_package:
            raise ValueError(f"The package {package_name} in the file isn't equal to the source package!")

    # Exit a parse tree produced by JavaParserLabeled#classBodyDeclaration2.
    def exitClassBodyDeclaration2(self, ctx: JavaParserLabeled.ClassBodyDeclaration2Context):
        self.enter_class = False
        try:
            if ctx.memberDeclaration().classDeclaration().IDENTIFIER().getText() != self.class_identifier:
                return
        except Exception:
            return

        self.class_found = True

        start_index = ctx.start.tokenIndex
        stop_index = ctx.stop.tokenIndex

        # get the class body from the token_stream_rewriter
        class_body = self.token_stream_rewriter.getText(
            program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
            start=start_index,
            stop=stop_index
        )

        self.code += f"import {self.source_package}.*;"
        self.code += self.NEW_LINE * 2
        self.code += f"// Class \"{self.class_identifier}\" moved here " \
                     f"from package {self.source_package} by CodART" + self.NEW_LINE + \
                     f"{class_body}"

        # delete class declaration from source class
        self.token_stream_rewriter.delete(
            program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
            from_idx=start_index,
            to_idx=stop_index
        )

        old_file = open(self.filename, 'w')
        old_file.write(self.token_stream_rewriter.getDefaultText().replace("\r", ""))

        print("----------------------------")
        print("Class attributes: ", str(self.class_fields))
        print("Class methods: ", str(self.class_methods))
        print("----------------------------")

    # Exit a parse tree produced by JavaParserLabeled#typeDeclaration.
    def exitTypeDeclaration(self, ctx: JavaParserLabeled.TypeDeclarationContext):
        if ctx.classDeclaration() is not None:
            self.enter_class = False
            if ctx.classDeclaration().IDENTIFIER().getText() != self.class_identifier:
                return

            self.enter_class = True
            self.class_found = True

            start_index = ctx.start.tokenIndex
            stop_index = ctx.stop.tokenIndex

            # get the class body from the token_stream_rewriter
            class_body = self.token_stream_rewriter.getText(
                program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
                start=start_index,
                stop=stop_index
            )

            self.code += f"import {self.source_package}.*;"
            self.code += self.NEW_LINE * 2
            self.code += f"// Class \"{self.class_identifier}\" moved here " \
                         f"from package {self.source_package} by CodART" + self.NEW_LINE + \
                         f"{class_body}"

            # delete class declaration from source class
            self.token_stream_rewriter.delete(
                program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
                from_idx=start_index,
                to_idx=stop_index
            )

            print("----------------------------")
            print("Class attributes: ", str(self.class_fields))
            print("Class methods: ", str(self.class_methods))
            print("----------------------------")

    # Enter a parse tree produced by JavaParserLabeled#fieldDeclaration.
    def enterFieldDeclaration(self, ctx: JavaParserLabeled.FieldDeclarationContext):
        if not self.enter_class:
            return

        list_of_fields = ctx.variableDeclarators().getText().split(",")

        for field in list_of_fields:
            self.class_fields.append(field)

    # Enter a parse tree produced by JavaParserLabeled#methodDeclaration.
    def enterMethodDeclaration(self, ctx: JavaParserLabeled.MethodDeclarationContext):
        if not self.enter_class:
            return
        method_name = ctx.IDENTIFIER().getText()
        self.class_methods.append(method_name)

    # Exit a parse tree produced by JavaParserLabeled#compilationUnit.
    def exitCompilationUnit(self, ctx: JavaParserLabeled.CompilationUnitContext):
        if not self.class_found:
            raise ValueError(f"Class \"{self.class_identifier}\" NOT FOUND!")

        file_address = self.dirname + '/' + self.target_package.replace('.',
                                                                        '/') + '/' + self.class_identifier + '.java'

        new_file = open(file_address, 'w')
        new_file.write(self.code.replace("\r", ""))
        print(f"The class \"{self.class_identifier}\" moved to the target package successfully!")
Esempio n. 57
0
class RemoveFieldRefactoringListener(JavaParserLabeledListener):
    def __init__(self, common_token_stream: CommonTokenStream = None, class_identifier: str = None,
                 fieldname: str = None, filename: str = None):
        """
        :param common_token_stream:
        """
        self.enter_class = False
        self.enter_field = False
        self.is_found_field = False
        self.is_found = False
        self.token_stream = common_token_stream
        self.class_identifier = class_identifier
        self.class_number = 0

        # Move all the tokens in the source code in a buffer, token_stream_rewriter.
        if common_token_stream is not None:
            self.token_stream_rewriter = TSR(common_token_stream)
        else:
            raise TypeError('common_token_stream is None')

        self.class_fields = []
        self.class_methods = []

        if class_identifier is not None:
            self.class_identifier = class_identifier
        else:
            raise ValueError("class_identifier is None")

        if filename is not None:
            self.filename = filename
        else:
            raise ValueError("filename is None")

        if fieldname is not None:
            self.fieldname = fieldname
        else:
            raise ValueError("fieldname is None")

    def enterClassDeclaration(self, ctx:JavaParserLabeled.ClassDeclarationContext):

        self.class_number += 1
        if ctx.IDENTIFIER().getText() != self.class_identifier:
            return
        self.enter_class = True

    # Enter a parse tree produced by Java9_v2Parser#normalClassDeclaration.
    # def enterNormalClassDeclaration(self, ctx: Java9_v2Parser.NormalClassDeclarationContext):
    #
    #     self.class_number += 1
    #     if ctx.identifier().getText() != self.class_identifier:
    #         return
    #     self.enter_class = True

    def exitClassDeclaration(self, ctx:JavaParserLabeled.ClassDeclarationContext):
        self.enter_class = False
        if ctx.IDENTIFIER().getText() != self.class_identifier:
            return

        old_file = open(self.filename, 'w')
        old_file.write(self.token_stream_rewriter.getDefaultText().replace("\r", ""))

    # Exit a parse tree produced by Java9_v2Parser#normalClassDeclaration.
    # def exitNormalClassDeclaration(self, ctx: Java9_v2Parser.NormalClassDeclarationContext):
    #     self.enter_class = False
    #     if ctx.identifier().getText() != self.class_identifier:
    #         return
    #
    #     old_file = open(self.filename, 'w')
    #     old_file.write(self.token_stream_rewriter.getDefaultText().replace("\r", ""))
    #
    #     # print("----------------------------")
    #     # print("Class attributes: ", str(self.class_fields))
    #     # print("Class methods: ", str(self.class_methods))
    #     # print("----------------------------")

    # Enter a parse tree produced by Java9_v2Parser#fieldDeclaration.

    def enterFieldDeclaration(self, ctx:JavaParserLabeled.FieldDeclarationContext):
        if not self.enter_class:
            return
        self.enter_field = True
        self.is_found_field = False

    # def enterFieldDeclaration(self, ctx:Java9_v2Parser.FieldDeclarationContext):
    #     if not self.enter_class:
    #         return
    #     self.enter_field = True
    #     self.is_found_field = False

    # Exit a parse tree produced by Java9_v2Parser#fieldDeclaration.

    def exitClassBodyDeclaration2(self, ctx:JavaParserLabeled.ClassBodyDeclaration2Context):
        if not self.enter_class:
            return

        # print("Enter 'exitFieldDeclaration' Methode")
        start = ctx.start.tokenIndex
        stop = ctx.stop.tokenIndex

        if (self.is_found_field):
            self.token_stream_rewriter.delete(program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
                                              from_idx=start,
                                              to_idx=stop)
            print(f'Field: "{self.fieldname}" SUCCESSFULLY REMOVED...')

    # def exitFieldDeclaration(self, ctx:JavaParserLabeled.FieldDeclarationContext):
    #     if not self.enter_class:
    #         return
    #
    #     # print("Enter 'exitFieldDeclaration' Methode")
    #     start = ctx.start.tokenIndex
    #     stop = ctx.stop.tokenIndex
    #
    #     if (self.is_found_field):
    #         self.token_stream_rewriter.delete(program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
    #                                           from_idx=start,
    #                                           to_idx=stop)
    #         print(f'Field: "{self.fieldname}" SUCCESSFULLY REMOVED...')

    # def exitFieldDeclaration(self, ctx: Java9_v2Parser.FieldDeclarationContext):
    #     if not self.enter_class:
    #         return
    #
    #     # print("Enter 'exitFieldDeclaration' Methode")
    #     start = ctx.start.tokenIndex
    #     stop = ctx.stop.tokenIndex
    #
    #     if (self.is_found_field):
    #         self.token_stream_rewriter.delete(program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
    #                                           from_idx=start,
    #                                           to_idx=stop)
    #         print(f'Field: "{self.fieldname}" SUCCESSFULLY REMOVED...')

    # Exit a parse tree produced by Java9_v2Parser#variableDeclaratorList.

    def exitVariableDeclarators(self, ctx:JavaParserLabeled.VariableDeclaratorsContext):
        if not (self.enter_class and self.enter_field):
            return
        # print("Enter 'exitVariableDeclaratorList' Methode")
        fields = ctx.getText().split(',')
        start = ctx.start.tokenIndex
        stop = ctx.stop.tokenIndex
        for index, field in enumerate(fields):
            if (self.fieldname == str(field).split('=')[0]):
                self.is_found = True
                self.is_found_field = True
                print(f'Find "{self.fieldname}", At: {start} - {stop}')
                if (len(fields) == 1):
                    return
                del fields[index]
                print('New: ' ,', '.join(str(field) for field in fields))
                self.token_stream_rewriter.replace(program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
                                                   from_idx=start,
                                                   to_idx=stop,
                                                   text=', '.join(str(field) for field in fields))
                self.is_found_field = False
                print(f'Field: "{self.fieldname}" SUCCESSFULLY REMOVED...')
                break

    # def exitVariableDeclaratorList(self, ctx:Java9_v2Parser.VariableDeclaratorListContext):
    #     if not (self.enter_class and self.enter_field):
    #         return
    #     # print("Enter 'exitVariableDeclaratorList' Methode")
    #     fields = ctx.getText().split(',')
    #     start = ctx.start.tokenIndex
    #     stop = ctx.stop.tokenIndex
    #     for index, field in enumerate(fields):
    #         if (self.fieldname == str(field).split('=')[0]):
    #             self.is_found = True
    #             self.is_found_field = True
    #             print(f'Find "{self.fieldname}", At: {start} - {stop}')
    #             if (len(fields) == 1):
    #                 return
    #             del fields[index]
    #             print('New: ' ,', '.join(str(field) for field in fields))
    #             self.token_stream_rewriter.replace(program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
    #                                                from_idx=start,
    #                                                to_idx=stop,
    #                                                text=', '.join(str(field) for field in fields))
    #             self.is_found_field = False
    #             print(f'Field: "{self.fieldname}" SUCCESSFULLY REMOVED...')
    #             break

    def exitCompilationUnit(self, ctx:JavaParserLabeled.CompilationUnitContext):
        if not self.is_found:
            print(f'Field "{self.fieldname}" NOT FOUND...')