Example #1
0
    def test_include1(self):
        dir_path = os.path.dirname(os.path.realpath(__file__))
        file_path = os.path.join(
            dir_path, "examples/include/01_include_test_config.cpp")
        with open(file_path, 'r', encoding='utf-8', newline=None) as fp:
            input_data = fp.read()
        tokens = Lexer(input_data, file_path).tokenize()
        preprocessor = PreProcessor(tokens, file_path)
        preprocessor.preprocess()
        tokens = preprocessor.tokens
        output = generator.from_tokens(tokens)

        expected_output = """1_include_test_file1_line1
1_include_test_file1_line2
1_include_test_file1_line3

1_include_test_file2_line1
1_include_test_file2_line2
1_include_test_file2_line3
class Foo {};
1_include_test_file3_line1
1_include_test_file3_line2
1_include_test_file3_line3"""

        self.assertEqual(expected_output, output)
Example #2
0
 def test_remove_multi_line_comment3(self):
     input_data = "/* Hello World*/"
     from armaclassparser import lexer
     tokens = Lexer(input_data, lexer.STRING_INPUT_FILE).tokenize()
     preprocessor = PreProcessor(tokens, None)
     preprocessor.tokens = tokens
     preprocessor._remove_comments()
     self.assertEqual(0, len(preprocessor.tokens))
Example #3
0
    def test_remove_single_line_comment6(self):
        input_data = """
// hello"""
        from armaclassparser import lexer
        tokens = Lexer(input_data, lexer.STRING_INPUT_FILE).tokenize()
        preprocessor = PreProcessor(tokens, None)
        preprocessor.tokens = tokens
        preprocessor._remove_comments()
        self.assertEqual(1, len(preprocessor.tokens))
Example #4
0
    def test_escaped_newlines(self):
        input_data = """\\
"""
        tokens = Lexer(input_data, lexer.STRING_INPUT_FILE).tokenize()
        preprocessor = PreProcessor(tokens, lexer.STRING_INPUT_FILE)
        preprocessor._remove_escaped_newlines()
        output = generator.from_tokens(preprocessor.tokens)
        expected_output = ""
        self.assertEqual(expected_output, output)
Example #5
0
    def test_remove_multi_line_comment2(self):
        input_data = '''/* Hello World */
class Foo {};'''
        from armaclassparser import lexer
        tokens = Lexer(input_data, lexer.STRING_INPUT_FILE).tokenize()
        len_before = len(tokens)
        preprocessor = PreProcessor(tokens, None)
        preprocessor.tokens = tokens
        preprocessor._remove_comments()
        self.assertEqual(len_before - 7, len(preprocessor.tokens))
Example #6
0
    def test_remove_single_line_comment1(self):
        input_data = '''#include "script_component.hpp" // Hello World
class Foo {};'''
        from armaclassparser import lexer
        tokens = Lexer(input_data, lexer.STRING_INPUT_FILE).tokenize()
        len_before = len(tokens)
        preprocessor = PreProcessor(tokens, None)
        preprocessor.tokens = tokens
        preprocessor._remove_comments()
        self.assertEqual(len_before - 5, len(preprocessor.tokens))
Example #7
0
 def _test_preprocessor(self, input_data, expected_output):
     tokens = Lexer(input_data, lexer.STRING_INPUT_FILE).tokenize()
     preprocessor = PreProcessor(tokens, lexer.STRING_INPUT_FILE)
     preprocessor.preprocess()
     output = generator.from_tokens(preprocessor.tokens)
     self.assertEqual(expected_output, output)