コード例 #1
0
ファイル: source_package.py プロジェクト: xxyzzzq/quex
def __copy_files(FileTxt):

    input_directory  = QUEX_PATH               
    output_directory = Setup.output_directory 

    file_list = map(lambda x: Lng["$code_base"] + x.strip(), FileTxt.split())

    # Ensure that all directories exist
    directory_list = []
    for file in file_list:
        directory = path.dirname(output_directory + file)
        if directory in directory_list: continue
        directory_list.append(directory)

    # Sort directories according to length --> create parent directories before child
    for directory in sorted(directory_list, key=len):
        if os.access(directory, os.F_OK) == True: continue
        # Create also parent directories, if required
        os.makedirs(directory)

    for file in file_list:
        input_file  = input_directory + file
        output_file = output_directory + file
        # Copy
        content     = open_file_or_die(input_file, "rb").read()
        write_safely_and_close(output_file, content)
コード例 #2
0
ファイル: source_package.py プロジェクト: smmckay/quex3
def __copy_files(OutputDir, FileSet):
    include_db = [
        ("declarations",      "$$INCLUDE_TOKEN_CLASS_DEFINITION$$",     Lng.INCLUDE(Setup.output_token_class_file)),
        ("implementations.i", "$$INCLUDE_TOKEN_CLASS_IMPLEMENTATION$$", Lng.INCLUDE(Setup.output_token_class_file_implementation)),
        ("implementations-inline.i", "$$INCLUDE_TOKEN_CLASS_IMPLEMENTATION$$", Lng.INCLUDE(Setup.output_token_class_file_implementation)),
        ("token/TokenQueue",  "$$INCLUDE_TOKEN_CLASS_DEFINITION$$",     Lng.INCLUDE(Setup.output_token_class_file)),
        ("token/TokenQueue",  "$$INCLUDE_LEXER_CLASS_DEFINITION$$",     Lng.INCLUDE(Setup.output_header_file)),
    ]
    for path, dummy, dummy in include_db:
        directory, basename = os.path.split(path)
        assert (not directory and basename in dir_db[""]) \
               or (basename in dir_db["%s/" % directory])

    file_pair_list,   \
    out_directory_set = __get_source_drain_list(OutputDir, FileSet)

    # Make directories
    # Sort according to length => create parent directories before child.
    for directory in sorted(out_directory_set, key=len):
        if os.access(directory, os.F_OK) == True: continue
        os.makedirs(directory) # create parents, if necessary

    # Copy
    for source_file, drain_file in file_pair_list:
        content = open_file_or_die(source_file, "rb").read()
        for path, origin, replacement in include_db:
            if not source_file.endswith(path): continue
            content = content.replace(origin, replacement)

        content = adapt.do(content, OutputDir, OriginalPath=source_file)
        write_safely_and_close(drain_file, content)
コード例 #3
0
ファイル: source_package.py プロジェクト: nyulacska/gpr
def __copy_files(FileTxt):

    input_directory = QUEX_PATH
    output_directory = Setup.output_directory

    file_list = map(lambda x: Lng["$code_base"] + x.strip(), FileTxt.split())

    # Ensure that all directories exist
    directory_list = []
    for file in file_list:
        directory = path.dirname(output_directory + file)
        if directory in directory_list: continue
        directory_list.append(directory)

    # Sort directories according to length --> create parent directories before child
    for directory in sorted(directory_list, key=len):
        if os.access(directory, os.F_OK) == True: continue
        # Create also parent directories, if required
        os.makedirs(directory)

    for file in file_list:
        input_file = input_directory + file
        output_file = output_directory + file
        # Copy
        content = open_file_or_die(input_file, "rb").read()
        write_safely_and_close(output_file, content)
コード例 #4
0
ファイル: core.py プロジェクト: smmckay/quex3
def _write_all(content_table):

    content_table = [
        (adapt.do(x[0], Setup.output_directory), x[1]) for x in content_table
    ]
    content_table = [
        (Lng.straighten_open_line_pragmas_new(x[0], x[1]), x[1]) for x in content_table
    ]

    for content, file_name in content_table:
        if not content: continue
        write_safely_and_close(file_name, content)
コード例 #5
0
    def straighten_open_line_pragmas(self, FileName):
        norm_filename   = Setup.get_file_reference(FileName)
        line_pragma_txt = self._SOURCE_REFERENCE_END().strip()

        new_content = []
        line_n      = 1 # NOT: 0!
        fh          = open_file_or_die(FileName)
        while 1 + 1 == 2:
            line = fh.readline()
            line_n += 1
            if not line: 
                break
            elif line.strip() != line_pragma_txt:
                new_content.append(line)
            else:
                line_n += 1
                new_content.append(self._SOURCE_REFERENCE_BEGIN(SourceRef(norm_filename, line_n)))
        fh.close()
        write_safely_and_close(FileName, "".join(new_content))
コード例 #6
0
def do():
    """Generates state machines for all modes. Each mode results into 
       a separate state machine that is stuck into a virtual function
       of a class derived from class 'quex_mode'.
    """
    if Setup.language == "DOT":
        return do_plot()

    mode_description_db = quex_file_parser.do(Setup.input_mode_files)

    # (*) Generate the token ids
    #     (This needs to happen after the parsing of mode_db, since during that
    #      the token_id_db is developed.)
    if Setup.external_lexeme_null_object != "":
        # Assume external implementation
        token_id_header = None
        function_map_id_to_name_implementation = ""
    else:
        token_id_header = token_id_maker.do(Setup)
        function_map_id_to_name_implementation = token_id_maker.do_map_id_to_name_function(
        )

    # (*) [Optional] Make a customized token class
    class_token_header, \
    class_token_implementation = token_class_maker.do(function_map_id_to_name_implementation)

    if Setup.token_class_only_f:
        write_safely_and_close(blackboard.token_type_definition.get_file_name(),
                                 do_token_class_info() \
                               + class_token_header)
        write_safely_and_close(Setup.output_token_class_file_implementation,
                               class_token_implementation)
        write_safely_and_close(Setup.output_token_id_file, token_id_header)
        Lng.straighten_open_line_pragmas(Setup.output_token_id_file)
        Lng.straighten_open_line_pragmas(
            Setup.output_token_class_file_implementation)
        Lng.straighten_open_line_pragmas(
            blackboard.token_type_definition.get_file_name())
        return

    # (*) implement the lexer mode-specific analyser functions
    #     During this process: mode_description_db --> mode_db
    function_analyzers_implementation, \
    mode_db                            = analyzer_functions_get(mode_description_db)

    # (*) Implement the 'quex' core class from a template
    # -- do the coding of the class framework
    configuration_header = configuration.do(mode_db)
    analyzer_header = analyzer_class.do(mode_db)
    analyzer_implementation = analyzer_class.do_implementation(mode_db) + "\n"
    mode_implementation = mode_classes.do(mode_db)

    # (*) [Optional] Generate a converter helper
    codec_converter_helper_header, \
    codec_converter_helper_implementation = codec_converter_helper.do()

    # Implementation (Potential Inline Functions)
    if class_token_implementation is not None:
        analyzer_implementation += class_token_implementation + "\n"

    # Engine (Source Code)
    engine_txt =   Lng.ENGINE_TEXT_EPILOG()               + "\n" \
                 + mode_implementation                    + "\n" \
                 + function_analyzers_implementation      + "\n" \
                 + function_map_id_to_name_implementation + "\n"

    # (*) Write Files ___________________________________________________________________
    if codec_converter_helper_header is not None:
        write_safely_and_close(Setup.output_buffer_codec_header,
                               codec_converter_helper_header)
        write_safely_and_close(Setup.output_buffer_codec_header_i,
                               codec_converter_helper_implementation)

    if token_id_header is not None:
        write_safely_and_close(Setup.output_token_id_file, token_id_header)

    write_safely_and_close(Setup.output_configuration_file,
                           configuration_header)

    if Setup.language == "C":
        engine_txt += analyzer_implementation
    else:
        analyzer_header = analyzer_header.replace(
            "$$ADDITIONAL_HEADER_CONTENT$$", analyzer_implementation)

    write_safely_and_close(Setup.output_header_file, analyzer_header)
    write_safely_and_close(Setup.output_code_file, engine_txt)

    if class_token_header is not None:
        write_safely_and_close(
            blackboard.token_type_definition.get_file_name(),
            class_token_header)

    Lng.straighten_open_line_pragmas(Setup.output_header_file)
    Lng.straighten_open_line_pragmas(Setup.output_code_file)
    if not blackboard.token_type_definition.manually_written():
        Lng.straighten_open_line_pragmas(
            blackboard.token_type_definition.get_file_name())

    if Setup.source_package_directory != "":
        source_package.do()
コード例 #7
0
ファイル: core.py プロジェクト: xxyzzzq/quex
def do():
    """Generates state machines for all modes. Each mode results into 
       a separate state machine that is stuck into a virtual function
       of a class derived from class 'quex_mode'.
    """
    if Setup.language == "DOT": 
        return do_plot()

    mode_description_db = quex_file_parser.do(Setup.input_mode_files)

    # (*) Generate the token ids
    #     (This needs to happen after the parsing of mode_db, since during that
    #      the token_id_db is developed.)
    if Setup.external_lexeme_null_object != "":
        # Assume external implementation
        token_id_header                        = None
        function_map_id_to_name_implementation = ""
    else:
        token_id_header                        = token_id_maker.do(Setup) 
        function_map_id_to_name_implementation = token_id_maker.do_map_id_to_name_function()

    # (*) [Optional] Make a customized token class
    class_token_header, \
    class_token_implementation = token_class_maker.do(function_map_id_to_name_implementation)

    if Setup.token_class_only_f:
        write_safely_and_close(blackboard.token_type_definition.get_file_name(), 
                                 do_token_class_info() \
                               + class_token_header)
        write_safely_and_close(Setup.output_token_class_file_implementation,
                               class_token_implementation)
        write_safely_and_close(Setup.output_token_id_file, token_id_header)
        Lng.straighten_open_line_pragmas(Setup.output_token_id_file)
        Lng.straighten_open_line_pragmas(Setup.output_token_class_file_implementation)
        Lng.straighten_open_line_pragmas(blackboard.token_type_definition.get_file_name())
        return

    # (*) implement the lexer mode-specific analyser functions
    #     During this process: mode_description_db --> mode_db
    function_analyzers_implementation, \
    mode_db                            = analyzer_functions_get(mode_description_db)

    # (*) Implement the 'quex' core class from a template
    # -- do the coding of the class framework
    configuration_header    = configuration.do(mode_db)
    analyzer_header         = analyzer_class.do(mode_db)
    analyzer_implementation = analyzer_class.do_implementation(mode_db) + "\n"
    mode_implementation     = mode_classes.do(mode_db)

    # (*) [Optional] Generate a converter helper
    codec_converter_helper_header, \
    codec_converter_helper_implementation = codec_converter_helper.do()
    
    # Implementation (Potential Inline Functions)
    if class_token_implementation is not None:
         analyzer_implementation += class_token_implementation + "\n" 

    # Engine (Source Code)
    engine_txt =   Lng.ENGINE_TEXT_EPILOG()               + "\n" \
                 + mode_implementation                    + "\n" \
                 + function_analyzers_implementation      + "\n" \
                 + function_map_id_to_name_implementation + "\n" 

    # (*) Write Files ___________________________________________________________________
    if codec_converter_helper_header is not None:
        write_safely_and_close(Setup.output_buffer_codec_header,   
                               codec_converter_helper_header) 
        write_safely_and_close(Setup.output_buffer_codec_header_i, 
                               codec_converter_helper_implementation) 

    if token_id_header is not None:
        write_safely_and_close(Setup.output_token_id_file, token_id_header)

    write_safely_and_close(Setup.output_configuration_file, configuration_header)

    if Setup.language == "C":
        engine_txt     += analyzer_implementation
    else:
        analyzer_header = analyzer_header.replace("$$ADDITIONAL_HEADER_CONTENT$$", 
                                                  analyzer_implementation)

    write_safely_and_close(Setup.output_header_file, analyzer_header)
    write_safely_and_close(Setup.output_code_file,   engine_txt)

    if class_token_header is not None:
        write_safely_and_close(blackboard.token_type_definition.get_file_name(), 
                               class_token_header)

    Lng.straighten_open_line_pragmas(Setup.output_header_file)
    Lng.straighten_open_line_pragmas(Setup.output_code_file)
    if not blackboard.token_type_definition.manually_written():
        Lng.straighten_open_line_pragmas(blackboard.token_type_definition.get_file_name())

    if Setup.source_package_directory != "":
        source_package.do()
コード例 #8
0
ファイル: core.py プロジェクト: nyulacska/gpr
 def __do(self, state_machine, FileName, Option="utf8"):
     dot_code = state_machine.get_graphviz_string(NormalizeF=Setup.normalize_f, Option=Option)
     write_safely_and_close(FileName, dot_code)