Exemple #1
0
def __get_mode_init_call(mode):
    
    analyzer_function = "QUEX_NAME(%s_analyzer_function)" % mode.name
    on_indentation    = "QUEX_NAME(%s_on_indentation)"    % mode.name
    on_entry          = "QUEX_NAME(%s_on_entry)"          % mode.name
    on_exit           = "QUEX_NAME(%s_on_exit)"           % mode.name
    has_base          = "QUEX_NAME(%s_has_base)"          % mode.name
    has_entry_from    = "QUEX_NAME(%s_has_entry_from)"    % mode.name
    has_exit_to       = "QUEX_NAME(%s_has_exit_to)"       % mode.name

    if mode.abstract_f(): 
        analyzer_function = "QUEX_NAME(Mode_uncallable_analyzer_function)"

    if not mode.incidence_db.has_key(E_IncidenceIDs.MODE_ENTRY):
        on_entry = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if not mode.incidence_db.has_key(E_IncidenceIDs.MODE_EXIT):
        on_exit = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if not mode.incidence_db.has_key(E_IncidenceIDs.INDENTATION_HANDLER):
        on_indentation = "QUEX_NAME(Mode_on_indentation_null_function)"

    txt = blue_print(quex_mode_init_call_str,
                [["$$MN$$",             mode.name],
                 ["$analyzer_function", analyzer_function],
                 ["$on_indentation",    on_indentation],
                 ["$on_entry",          on_entry],
                 ["$on_exit",           on_exit],
                 ["$has_base",          has_base],
                 ["$has_entry_from",    has_entry_from],
                 ["$has_exit_to",       has_exit_to]])

    return txt
Exemple #2
0
def __lc_counting_replacements(code_str, CharacterSet):
    """Line and Column Number Counting(Range Skipper):
     
         -- in loop if there appears a newline, then do:
            increment line_n
            set position from where to count column_n
         -- at end of skipping do one of the following:
            if end delimiter contains newline:
               column_n = number of letters since last new line in end delimiter
               increment line_n by number of newlines in end delimiter.
               (NOTE: in this case the setting of the position from where to count
                      the column_n can be omitted.)
            else:
               column_n = current_position - position from where to count column number.

       NOTE: On reload we do count the column numbers and reset the column_p.
    """
    set_reference = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    in_loop       = ""
    # Does the end delimiter contain a newline?
    if CharacterSet.contains(ord("\n")): in_loop = line_column_counter_in_loop()

    end_procedure = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(me->buffer._input_p - reference_p));\n" 
    before_reload = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(me->buffer._input_p - reference_p));\n" 
    after_reload  = "       __QUEX_IF_COUNT_COLUMNS(reference_p = me->buffer._input_p);\n" 

    return blue_print(code_str,
                     [["$$LC_COUNT_COLUMN_N_POINTER_REFERENCE$$", set_reference],
                      ["$$LC_COUNT_IN_LOOP$$",                     in_loop],
                      ["$$LC_COUNT_END_PROCEDURE$$",               end_procedure],
                      ["$$LC_COUNT_BEFORE_RELOAD$$",               before_reload],
                      ["$$LC_COUNT_AFTER_RELOAD$$",                after_reload],
                      ])
Exemple #3
0
def do_implementation(ModeDB):

    FileTemplate = os.path.normpath(QUEX_PATH + Lng["$code_base"] +
                                    "/analyzer/TXT-Cpp.i")
    func_txt = get_file_content_or_die(FileTemplate)

    func_txt = blue_print(func_txt, [
        [
            "$$CONSTRUCTOR_EXTENSTION$$",
            Lng.SOURCE_REFERENCED(blackboard.class_constructor_extension)
        ],
        [
            "$$CONVERTER_HELPER_I$$",
            Setup.get_file_reference(Setup.output_buffer_codec_header_i)
        ],
        [
            "$$CONSTRUCTOR_MODE_DB_INITIALIZATION_CODE$$",
            get_constructor_code(ModeDB.values())
        ],
        [
            "$$MEMENTO_EXTENSIONS_PACK$$",
            Lng.SOURCE_REFERENCED(blackboard.memento_pack_extension)
        ],
        [
            "$$MEMENTO_EXTENSIONS_UNPACK$$",
            Lng.SOURCE_REFERENCED(blackboard.memento_unpack_extension)
        ],
    ])
    return func_txt
Exemple #4
0
def __get_mode_init_call(mode):
    
    analyzer_function = "QUEX_NAME(%s_analyzer_function)" % mode.name
    on_indentation    = "QUEX_NAME(%s_on_indentation)"    % mode.name
    on_entry          = "QUEX_NAME(%s_on_entry)"          % mode.name
    on_exit           = "QUEX_NAME(%s_on_exit)"           % mode.name
    has_base          = "QUEX_NAME(%s_has_base)"          % mode.name
    has_entry_from    = "QUEX_NAME(%s_has_entry_from)"    % mode.name
    has_exit_to       = "QUEX_NAME(%s_has_exit_to)"       % mode.name

    if mode.options["inheritable"] == "only": 
        analyzer_function = "QUEX_NAME(Mode_uncallable_analyzer_function)"

    if len(mode.get_code_fragment_list("on_entry")) == 0:
        on_entry = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if len(mode.get_code_fragment_list("on_exit")) == 0:
        on_exit = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if len(mode.get_code_fragment_list("on_indentation")) == 0:
        on_indentation = "QUEX_NAME(Mode_on_indentation_null_function)"

    txt = blue_print(quex_mode_init_call_str,
                [["$$MN$$",             mode.name],
                 ["$analyzer_function", analyzer_function],
                 ["$on_indentation",    on_indentation],
                 ["$on_entry",          on_entry],
                 ["$on_exit",           on_exit],
                 ["$has_base",          has_base],
                 ["$has_entry_from",    has_entry_from],
                 ["$has_exit_to",       has_exit_to]])

    return txt
Exemple #5
0
def do_map_id_to_name_function():
    """Generate function which maps from token-id to string with the 
    name of the token id.
    """
    L = max(map(lambda name: len(name), token_id_db.keys()))

    # -- define the function for token names
    switch_cases = []
    token_names  = []
    for token_name in sorted(token_id_db.keys()):
        if token_name in standard_token_id_list: continue

        # UCS codepoints are coded directly as pure numbers
        if len(token_name) > 2 and token_name[:2] == "--":
            token = token_id_db[token_name]
            switch_cases.append("   case 0x%06X: return token_id_str_%s;\n" % \
                                (token.number, token.name))
            token_names.append("   static const char  token_id_str_%s[]%s = \"%s\";\n" % \
                               (token.name, space(L, token.name), token.name))
        else:
            switch_cases.append("   case %s%s:%s return token_id_str_%s;\n" % \
                                (Setup.token_id_prefix, token_name, space(L, token_name), token_name))
            token_names.append("   static const char  token_id_str_%s[]%s = \"%s\";\n" % \
                               (token_name, space(L, token_name), token_name))

    return blue_print(func_str,
                      [["$$TOKEN_ID_CASES$$", "".join(switch_cases)],
                       ["$$TOKEN_PREFIX$$",   Setup.token_id_prefix], 
                       ["$$TOKEN_NAMES$$",    "".join(token_names)], ])
Exemple #6
0
def do_map_id_to_name_cases():
    """Generate function which maps from token-id to string with the 
    name of the token id.
    """
    if not token_db.token_id_db: return ""
    L = max(len(name) for name in token_db.token_id_db.keys())

    def get_case(token_name, token):
        if token_name.startswith("--"):
            # UCS codepoints are coded directly as pure numbers
            return "   case 0x%06X: return \"%s\";\n" % \
                   (token.number, token.name)
        else:
            return "   case %s%s:%s return \"%s\";\n" % \
                   (Setup.token_id_prefix, token_name, space(L, token_name), token_name)

    # -- define the function for token names
    switch_cases = [
        get_case(token_name, token)
        for token_name, token in sorted(token_db.token_id_db.iteritems())
        if token_name not in standard_token_id_list
    ]

    txt = blue_print(map_id_to_name_cases,
                     [["$$TOKEN_ID_CASES$$", "".join(switch_cases)],
                      ["$$TOKEN_PREFIX$$", Setup.token_id_prefix]])

    return txt
Exemple #7
0
def do(Modes):
    LexerClassName              = Setup.analyzer_class_name
    DerivedClassName            = Setup.analyzer_derived_class_name
    DerivedClassHeaderFileName  = Setup.analyzer_derived_class_file

    if DerivedClassHeaderFileName != "": txt = "#include <" + Setup.get_file_reference(DerivedClassHeaderFileName) +">\n"
    else:                                txt = "#include \"" + Setup.get_file_reference(Setup.output_header_file) +"\"\n"

    txt += "#include <quex/code_base/analyzer/C-adaptions.h>\n"

    # -- mode class member function definitions (on_entry, on_exit, has_base, ...)
    mode_class_member_functions_txt = write_member_functions(Modes.values())

    mode_objects_txt = ""    
    for mode_name, mode in Modes.items():
        if mode.options["inheritable"] == "only": continue
        mode_objects_txt += "/* Global */QUEX_NAME(Mode)  QUEX_NAME(%s);\n" % mode_name

    txt += "QUEX_NAMESPACE_MAIN_OPEN\n"
    txt += mode_objects_txt
    txt += mode_class_member_functions_txt
    txt += "QUEX_NAMESPACE_MAIN_CLOSE\n"

    txt = blue_print(txt, [["$$LEXER_CLASS_NAME$$",         LexerClassName],
                           ["$$LEXER_DERIVED_CLASS_NAME$$", DerivedClassName]])
    
    return txt
Exemple #8
0
    def get_graphviz_string(self, NormalizeF=False, Option="utf8"):
        assert Option in ["hex", "utf8"]

        # (*) normalize the state indices
        index_map, inverse_index_map, index_sequence = self.get_state_index_normalization(NormalizeF)

        # (*) Border of plot block
        frame_txt  = "digraph state_machine_%i {\n" % self.get_id()
        frame_txt += "rankdir=LR;\n"
        frame_txt += "size=\"8,5\"\n"
        frame_txt += "node [shape = doublecircle]; $$ACCEPTANCE_STATES$$\n"
        frame_txt += "node [shape = circle];\n"
        frame_txt += "$$TRANSITIONS$$"
        frame_txt += "}\n"

        transition_str       = ""
        acceptance_state_str = ""
        for printed_state_i, state in sorted(imap(lambda i: (index_map[i], self.states[i]), index_sequence)):
            if state.is_acceptance(): 
                acceptance_state_str += "%i; " % int(printed_state_i)
            transition_str += state.get_graphviz_string(printed_state_i, index_map, Option)

        if acceptance_state_str != "": acceptance_state_str = acceptance_state_str[:-2] + ";"
        return blue_print(frame_txt, [["$$ACCEPTANCE_STATES$$", acceptance_state_str ],
                                      ["$$TRANSITIONS$$",       transition_str]])
Exemple #9
0
def do_map_id_to_name_function():
    """Generate function which maps from token-id to string with the 
    name of the token id.
    """
    L = max(map(lambda name: len(name), token_id_db.keys()))

    # -- define the function for token names
    switch_cases = []
    token_names  = []
    for token_name in sorted(token_id_db.keys()):
        if token_name in standard_token_id_list: continue

        # UCS codepoints are coded directly as pure numbers
        if len(token_name) > 2 and token_name[:2] == "--":
            token = token_id_db[token_name]
            switch_cases.append("   case 0x%06X: return token_id_str_%s;\n" % \
                                (token.number, token.name))
            token_names.append("   static const char  token_id_str_%s[]%s = \"%s\";\n" % \
                               (token.name, space(L, token.name), token.name))
        else:
            switch_cases.append("   case %s%s:%s return token_id_str_%s;\n" % \
                                (Setup.token_id_prefix, token_name, space(L, token_name), token_name))
            token_names.append("   static const char  token_id_str_%s[]%s = \"%s\";\n" % \
                               (token_name, space(L, token_name), token_name))

    return blue_print(func_str,
                      [["$$TOKEN_ID_CASES$$", "".join(switch_cases)],
                       ["$$TOKEN_PREFIX$$",   Setup.token_id_prefix], 
                       ["$$TOKEN_NAMES$$",    "".join(token_names)], ])
Exemple #10
0
    def get_graphviz_string(self, NormalizeF=False, Option="utf8"):
        assert Option in ["hex", "utf8"]

        # (*) normalize the state indices
        index_map, inverse_index_map, index_sequence = self.get_state_index_normalization(
            NormalizeF)

        # (*) Border of plot block
        frame_txt = "digraph state_machine_%i {\n" % self.get_id()
        frame_txt += "rankdir=LR;\n"
        frame_txt += "size=\"8,5\"\n"
        frame_txt += "node [shape = doublecircle]; $$ACCEPTANCE_STATES$$\n"
        frame_txt += "node [shape = circle];\n"
        frame_txt += "$$TRANSITIONS$$"
        frame_txt += "}\n"

        transition_str = ""
        acceptance_state_str = ""
        for printed_state_i, state in sorted(
                imap(lambda i: (index_map[i], self.states[i]),
                     index_sequence)):
            if state.is_acceptance():
                acceptance_state_str += "%i; " % int(printed_state_i)
            transition_str += state.get_graphviz_string(
                printed_state_i, index_map, Option)

        if acceptance_state_str != "":
            acceptance_state_str = acceptance_state_str[:-2] + ";"
        return blue_print(frame_txt,
                          [["$$ACCEPTANCE_STATES$$", acceptance_state_str],
                           ["$$TRANSITIONS$$", transition_str]])
Exemple #11
0
def get_on_indentation_handler(Mode):
    # 'on_dedent' and 'on_n_dedent cannot be defined at the same time.
    assert not (    E_IncidenceIDs.INDENTATION_DEDENT   in Mode.incidence_db \
                and E_IncidenceIDs.INDENTATION_N_DEDENT in Mode.incidence_db)

    # A mode that deals only with the default indentation handler relies
    # on what is defined in '$QUEX_PATH/analyzer/member/on_indentation.i'
    if Mode.incidence_db.default_indentation_handler_f():
        return "    return;"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_INDENT)
    if code_fragment is not None:
        on_indent_str = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_indent_str = "self_send(__QUEX_SETTING_TOKEN_ID_INDENT);"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_NODENT)
    if code_fragment is not None:
        on_nodent_str = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_nodent_str = "self_send(__QUEX_SETTING_TOKEN_ID_NODENT);"

    on_dedent_str = ""
    on_n_dedent_str = ""
    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_DEDENT)
    if code_fragment is not None:
        on_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_N_DEDENT)
    if code_fragment is not None:
        on_n_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)

    if (not on_dedent_str) and (not on_n_dedent_str):
        # If no 'on_dedent' and no 'on_n_dedent' is defined ...
        on_dedent_str = ""
        on_n_dedent_str = "#if defined(QUEX_OPTION_TOKEN_REPETITION_SUPPORT)\n"
        on_n_dedent_str += "    self_send_n(ClosedN, __QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#else\n"
        on_n_dedent_str += "    while( start-- != stack->back ) self_send(__QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#endif\n"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_ERROR)
    if code_fragment is not None:
        on_indentation_error = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        # Default: Blow the program if there is an indentation error.
        on_indentation_error = 'QUEX_ERROR_EXIT("Lexical analyzer mode \'%s\': indentation error detected!\\n"' \
                               % Mode.name + \
                               '                "No \'on_indentation_error\' handler has been specified.\\n");'

    # Note: 'on_indentation_bad' is applied in code generation for
    #       indentation counter in 'indentation_counter.py'.
    txt = blue_print(
        on_indentation_str,
        [["$$INDENT-PROCEDURE$$", on_indent_str],
         ["$$NODENT-PROCEDURE$$", on_nodent_str],
         ["$$DEDENT-PROCEDURE$$", on_dedent_str],
         ["$$N-DEDENT-PROCEDURE$$", on_n_dedent_str],
         ["$$INDENTATION-ERROR-PROCEDURE$$", on_indentation_error]])
    return txt
Exemple #12
0
def __get_mode_init_call(mode):
    
    analyzer_function = "QUEX_NAME(%s_analyzer_function)" % mode.name
    on_indentation    = "QUEX_NAME(%s_on_indentation)"    % mode.name
    on_entry          = "QUEX_NAME(%s_on_entry)"          % mode.name
    on_exit           = "QUEX_NAME(%s_on_exit)"           % mode.name
    has_base          = "QUEX_NAME(%s_has_base)"          % mode.name
    has_entry_from    = "QUEX_NAME(%s_has_entry_from)"    % mode.name
    has_exit_to       = "QUEX_NAME(%s_has_exit_to)"       % mode.name

    if mode.abstract_f(): 
        analyzer_function = "QUEX_NAME(Mode_uncallable_analyzer_function)"

    if not mode.incidence_db.has_key(E_IncidenceIDs.MODE_ENTRY):
        on_entry = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if not mode.incidence_db.has_key(E_IncidenceIDs.MODE_EXIT):
        on_exit = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if not mode.incidence_db.has_key(E_IncidenceIDs.INDENTATION_HANDLER):
        on_indentation = "QUEX_NAME(Mode_on_indentation_null_function)"

    txt = blue_print(quex_mode_init_call_str,
                [["$$MN$$",             mode.name],
                 ["$analyzer_function", analyzer_function],
                 ["$on_indentation",    on_indentation],
                 ["$on_entry",          on_entry],
                 ["$on_exit",           on_exit],
                 ["$has_base",          has_base],
                 ["$has_entry_from",    has_entry_from],
                 ["$has_exit_to",       has_exit_to]])

    return txt
Exemple #13
0
def __lc_counting_replacements(code_str, CharacterSet):
    """Line and Column Number Counting(Range Skipper):
     
         -- in loop if there appears a newline, then do:
            increment line_n
            set position from where to count column_n
         -- at end of skipping do one of the following:
            if end delimiter contains newline:
               column_n = number of letters since last new line in end delimiter
               increment line_n by number of newlines in end delimiter.
               (NOTE: in this case the setting of the position from where to count
                      the column_n can be omitted.)
            else:
               column_n = current_position - position from where to count column number.

       NOTE: On reload we do count the column numbers and reset the column_p.
    """
    set_reference = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    in_loop = ""
    # Does the end delimiter contain a newline?
    if CharacterSet.contains(ord("\n")):
        in_loop = line_column_counter_in_loop()

    end_procedure = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(me->buffer._input_p - reference_p));\n"
    before_reload = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(me->buffer._input_p - reference_p));\n"
    after_reload = "       __QUEX_IF_COUNT_COLUMNS(reference_p = me->buffer._input_p);\n"

    return blue_print(code_str, [
        ["$$LC_COUNT_COLUMN_N_POINTER_REFERENCE$$", set_reference],
        ["$$LC_COUNT_IN_LOOP$$", in_loop],
        ["$$LC_COUNT_END_PROCEDURE$$", end_procedure],
        ["$$LC_COUNT_BEFORE_RELOAD$$", before_reload],
        ["$$LC_COUNT_AFTER_RELOAD$$", after_reload],
    ])
Exemple #14
0
def __cpp_terminal_states(StateMachineName, sm, action_db, DefaultAction):
    
    # -- specific terminal states of patterns (entered from acceptance states)
    txt = ""
    for pattern_id in action_db.keys():
        txt += "  %s:\n" % get_label("", None, pattern_id)
        action_code = "    " + action_db[pattern_id].replace("\n", "\n    ")   
        txt += "    QUEX_STREAM_SEEK(last_acceptance_input_position);"
        txt += action_code + "\n"    
        txt += "    // if action code returns from the function, then the following is meaningless\n"
        if sm.states[sm.init_state_index].transitions().is_empty() == False:
            txt += "    QUEX_STREAM_GET(input);"
        txt += "    goto QUEX_LABEL_%s_ENTRY_INITIAL_STATE;\n" %  StateMachineName

    specific_terminal_states_str = txt

    #  -- general terminal state (entered from non-acceptance state)    
    txt = ""    
    for pattern_id in action_db.keys():
        txt += "     case %s: goto %s;\n" % \
                (repr(pattern_id), get_label("", None, pattern_id))
    jumps_to_acceptance_states_str = txt


    #     -- execute default pattern action 
    #     -- reset character stream to last success             
    #     -- goto initial state 
    txt = blue_print(__cpp_terminal_state_str, 
                     [["%%JUMPS_TO_ACCEPTANCE_STATE%%",    jumps_to_acceptance_states_str],   
                      ["%%SPECIFIC_TERMINAL_STATES%%",     specific_terminal_states_str],
                      ["%%DEFAULT_ACTION%%",               DefaultAction.replace("\n", "        \n")],
                      ["%%STATE_MACHINE_NAME%%",           StateMachineName],
                      ["%%INITIAL_STATE_INDEX_LABEL%%",    get_label(StateMachineName, sm.init_state_index)]])
    return txt
Exemple #15
0
def __get_mode_init_call(mode):

    analyzer_function = "QUEX_NAME(%s_analyzer_function)" % mode.name
    on_indentation = "QUEX_NAME(%s_on_indentation)" % mode.name
    on_entry = "QUEX_NAME(%s_on_entry)" % mode.name
    on_exit = "QUEX_NAME(%s_on_exit)" % mode.name
    has_base = "QUEX_NAME(%s_has_base)" % mode.name
    has_entry_from = "QUEX_NAME(%s_has_entry_from)" % mode.name
    has_exit_to = "QUEX_NAME(%s_has_exit_to)" % mode.name

    if mode.options["inheritable"] == "only":
        analyzer_function = "QUEX_NAME(Mode_uncallable_analyzer_function)"

    if len(mode.get_code_fragment_list("on_entry")) == 0:
        on_entry = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if len(mode.get_code_fragment_list("on_exit")) == 0:
        on_exit = "QUEX_NAME(Mode_on_entry_exit_null_function)"

    if len(mode.get_code_fragment_list("on_indentation")) == 0:
        on_indentation = "QUEX_NAME(Mode_on_indentation_null_function)"

    txt = blue_print(
        quex_mode_init_call_str,
        [["$$MN$$", mode.name], ["$analyzer_function", analyzer_function],
         ["$on_indentation", on_indentation], ["$on_entry", on_entry],
         ["$on_exit", on_exit], ["$has_base", has_base],
         ["$has_entry_from", has_entry_from], ["$has_exit_to", has_exit_to]])

    return txt
Exemple #16
0
def get_on_indentation_handler(Mode):
    # 'on_dedent' and 'on_n_dedent cannot be defined at the same time.
    assert not (    E_IncidenceIDs.INDENTATION_DEDENT   in Mode.incidence_db \
                and E_IncidenceIDs.INDENTATION_N_DEDENT in Mode.incidence_db)

    # A mode that deals only with the default indentation handler relies
    # on what is defined in '$QUEX_PATH/analyzer/member/on_indentation.i'
    if Mode.incidence_db.default_indentation_handler_f():
        return "    return;"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_INDENT)
    if code_fragment is not None:
        on_indent_str   = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_indent_str   = "self_send(__QUEX_SETTING_TOKEN_ID_INDENT);"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_NODENT)
    if code_fragment is not None:
        on_nodent_str   = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_nodent_str   = "self_send(__QUEX_SETTING_TOKEN_ID_NODENT);"

    on_dedent_str   = ""
    on_n_dedent_str = ""
    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_DEDENT)
    if code_fragment is not None:
        on_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_N_DEDENT)
    if code_fragment is not None:
        on_n_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)

    if (not on_dedent_str) and (not on_n_dedent_str):
        # If no 'on_dedent' and no 'on_n_dedent' is defined ... 
        on_dedent_str    = ""
        on_n_dedent_str  = "#if defined(QUEX_OPTION_TOKEN_REPETITION_SUPPORT)\n"
        on_n_dedent_str += "    self_send_n(ClosedN, __QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#else\n"
        on_n_dedent_str += "    while( start-- != stack->back ) self_send(__QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#endif\n"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_ERROR)
    if code_fragment is not None:
        on_indentation_error = Lng.SOURCE_REFERENCED(code_fragment) 
    else:
        # Default: Blow the program if there is an indentation error.
        on_indentation_error = 'QUEX_ERROR_EXIT("Lexical analyzer mode \'%s\': indentation error detected!\\n"' \
                               % Mode.name + \
                               '                "No \'on_indentation_error\' handler has been specified.\\n");'

    # Note: 'on_indentation_bad' is applied in code generation for 
    #       indentation counter in 'indentation_counter.py'.
    txt = blue_print(on_indentation_str,
                     [["$$INDENT-PROCEDURE$$",            on_indent_str],
                      ["$$NODENT-PROCEDURE$$",            on_nodent_str],
                      ["$$DEDENT-PROCEDURE$$",            on_dedent_str],
                      ["$$N-DEDENT-PROCEDURE$$",          on_n_dedent_str],
                      ["$$INDENTATION-ERROR-PROCEDURE$$", on_indentation_error]])
    return txt
Exemple #17
0
def get_on_indentation_handler(Mode):

    # 'on_dedent' and 'on_n_dedent cannot be defined at the same time.
    assert not (    Mode.has_code_fragment_list("on_dedent") \
                and Mode.has_code_fragment_list("on_n_dedent"))


    # A mode that deals only with the default indentation handler relies
    # on what is defined in '$QUEX_PATH/analayzer/member/on_indentation.i'
    if Mode.default_indentation_handler_sufficient():
        return "    return;"

    if Mode.has_code_fragment_list("on_indent"):
        on_indent_str, eol_f = action_preparation.get_code(Mode.get_code_fragment_list("on_indent"))
    else:
        on_indent_str = "self_send(__QUEX_SETTING_TOKEN_ID_INDENT);"

    if Mode.has_code_fragment_list("on_nodent"):
        on_nodent_str, eol_f = action_preparation.get_code(Mode.get_code_fragment_list("on_nodent"))
    else:
        on_nodent_str = "self_send(__QUEX_SETTING_TOKEN_ID_NODENT);"

    if Mode.has_code_fragment_list("on_dedent"):
        assert not Mode.has_code_fragment_list("on_n_dedent")
        on_dedent_str, eol_f = action_preparation.get_code(Mode.get_code_fragment_list("on_dedent"))
        on_n_dedent_str      = ""

    elif Mode.has_code_fragment_list("on_n_dedent"):
        assert not Mode.has_code_fragment_list("on_dedent")
        on_n_dedent_str, eol_f = action_preparation.get_code(Mode.get_code_fragment_list("on_n_dedent"))
        on_dedent_str          = ""

    else:
        # If no 'on_dedent' and no 'on_n_dedent' is defined ... 
        on_dedent_str    = ""
        on_n_dedent_str  = "#if defined(QUEX_OPTION_TOKEN_REPETITION_SUPPORT)\n"
        on_n_dedent_str += "    self_send_n(ClosedN, __QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#else\n"
        on_n_dedent_str += "    while( start-- != stack->back ) self_send(__QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#endif\n"

    if not Mode.has_code_fragment_list("on_indentation_error"):
        # Default: Blow the program if there is an indentation error.
        on_indentation_error = 'QUEX_ERROR_EXIT("Lexical analyzer mode \'%s\': indentation error detected!\\n"' \
                               % Mode.name + \
                               '                "No \'on_indentation_error\' handler has been specified.\\n");'
    else:
        on_indentation_error, eol_f = action_preparation.get_code(Mode.get_code_fragment_list("on_indentation_error"))

    # Note: 'on_indentation_bad' is applied in code generation for 
    #       indentation counter in 'indentation_counter.py'.
    txt = blue_print(on_indentation_str,
                     [["$$INDENT-PROCEDURE$$",            on_indent_str],
                      ["$$NODENT-PROCEDURE$$",            on_nodent_str],
                      ["$$DEDENT-PROCEDURE$$",            on_dedent_str],
                      ["$$N-DEDENT-PROCEDURE$$",          on_n_dedent_str],
                      ["$$INDENTATION-ERROR-PROCEDURE$$", on_indentation_error]])
    return txt
Exemple #18
0
def do(setup):
    """________________________________________________________________________
       (1) Error Check 
       
       (2) Generates a file containing:
    
       -- token id definitions (if they are not done in '--foreign-token-id-file').

       -- const string& TokenClass::map_id_to_name(), i.e. a function which can 
          convert token ids into strings.
       ________________________________________________________________________
    """
    global file_str
    # At this point, assume that the token type has been generated.
    assert blackboard.token_type_definition is not None

    # (1) Error Check
    #
    __warn_implicit_token_definitions()
    if len(Setup.token_id_foreign_definition_file) == 0:
        __autogenerate_token_id_numbers()
        __warn_on_double_definition()
        # If a mandatory token id is missing, this means that Quex did not
        # properly do implicit token definitions. Program error-abort.
        __error_on_mandatory_token_id_missing(AssertF=True)
    else:
        __error_on_mandatory_token_id_missing()

    __error_on_no_specific_token_ids()

    # (2) Generate token id file (if not specified outside)
    #
    if len(Setup.token_id_foreign_definition_file) != 0:
        # Content of file = inclusion of 'Setup.token_id_foreign_definition_file'.
        token_id_txt = [
            "#include \"%s\"\n" %
            Setup.get_file_reference(Setup.token_id_foreign_definition_file)
        ]
    else:
        token_id_txt = __get_token_id_definition_txt()

    include_guard_ext = get_include_guard_extension(Setup.analyzer_name_safe.upper()     \
                                                    + "__"                               \
                                                    + Setup.token_class_name_safe.upper())

    content = blue_print(file_str, [
        ["$$TOKEN_ID_DEFINITIONS$$", "".join(token_id_txt)],
        ["$$DATE$$", time.asctime()],
        [
            "$$TOKEN_CLASS_DEFINITION_FILE$$",
            Setup.get_file_reference(
                blackboard.token_type_definition.get_file_name())
        ],
        ["$$TOKEN_PREFIX$$", Setup.token_id_prefix],
        ["$$INCLUDE_GUARD_EXT$$", include_guard_ext],
    ])

    return content
Exemple #19
0
 def HEADER_DEFINITIONS(self):
     return blue_print(cpp_header_definition_str, [
         ("$$CONTINUE_WITH_ON_AFTER_MATCH$$",
          dial_db.get_label_by_door_id(
              DoorID.continue_with_on_after_match())),
         ("$$RETURN_WITH_ON_AFTER_MATCH$$",
          dial_db.get_label_by_door_id(
              DoorID.return_with_on_after_match())),
     ])
Exemple #20
0
 def prepare(template, name, code_unit_type, max_code_unit_n):
     result = blue_print(string_template, [
         ["$$DRAIN_ENCODING$$", name],
         ["$$DRAIN_CODE_UNIT_TYPE$$", code_unit_type],
         ["$$DRAIN_ENCODING_MAX_CODE_UNIT_N$$",
          str(max_code_unit_n)],
     ])
     if name == "wchar_t":
         result = "\n".join([
             "#if ! defined(QUEX_OPTION_WCHAR_T_DISABLED_EXT)", result,
             "#endif"
         ])
     return result
Exemple #21
0
def do(setup):
    """________________________________________________________________________
       (1) Error Check 
       
       (2) Generates a file containing:
    
       -- token id definitions (if they are not done in '--foreign-token-id-file').

       -- const string& TokenClass::map_id_to_name(), i.e. a function which can 
          convert token ids into strings.
       ________________________________________________________________________
    """
    global file_str
    # At this point, assume that the token type has been generated.
    assert blackboard.token_type_definition is not None

    # (1) Error Check
    #
    __warn_implicit_token_definitions()
    if len(Setup.token_id_foreign_definition_file) == 0:
        __autogenerate_token_id_numbers()
        __warn_on_double_definition()
        # If a mandatory token id is missing, this means that Quex did not
        # properly do implicit token definitions. Program error-abort.
        __error_on_mandatory_token_id_missing(AssertF=True)
    else:
        __error_on_mandatory_token_id_missing()

    __error_on_no_specific_token_ids()

    # (2) Generate token id file (if not specified outside)
    #
    if len(Setup.token_id_foreign_definition_file) != 0:
        # Content of file = inclusion of 'Setup.token_id_foreign_definition_file'.
        token_id_txt = ["#include \"%s\"\n" % Setup.get_file_reference(Setup.token_id_foreign_definition_file)]
    else:
        token_id_txt = __get_token_id_definition_txt()

    include_guard_ext = get_include_guard_extension(Setup.analyzer_name_safe.upper()     \
                                                    + "__"                               \
                                                    + Setup.token_class_name_safe.upper())

    content = blue_print(file_str,
                         [["$$TOKEN_ID_DEFINITIONS$$",        "".join(token_id_txt)],
                          ["$$DATE$$",                        time.asctime()],
                          ["$$TOKEN_CLASS_DEFINITION_FILE$$", Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                          ["$$TOKEN_PREFIX$$",                Setup.token_id_prefix], 
                          ["$$INCLUDE_GUARD_EXT$$",           include_guard_ext], 
                         ])

    return content
Exemple #22
0
def replace_keywords(program_txt, Lng, NoIndentF):
    """Replaces pseudo-code keywords with keywords of the given language."""

    txt = blue_print(program_txt, Lng.items())

    if NoIndentF == False:
        # delete the last newline, to prevent additional indentation
        if txt[-1] == "\n": txt = txt[:-1]
        # indent by four spaces
        # (if this happens in recursively called functions nested indented blocks
        #  are correctly indented, see NumberSet::get_condition_code() for example)
        txt = txt.replace("\n", "\n    ") + "\n"

    return txt
Exemple #23
0
def replace_keywords(program_txt, LanguageDB, NoIndentF):
    """Replaces pseudo-code keywords with keywords of the given language."""

    txt = blue_print(program_txt, LanguageDB.items())

    if NoIndentF == False:
        # delete the last newline, to prevent additional indentation
        if txt[-1] == "\n": txt = txt[:-1]
        # indent by four spaces
        # (if this happens in recursively called functions nested indented blocks
        #  are correctly indented, see NumberSet::get_condition_code() for example)     
        txt = txt.replace("\n", "\n    ") + "\n"
    
    return txt          
Exemple #24
0
def do_implementation(ModeDB):

    FileTemplate = os.path.normpath(QUEX_PATH
                                    + Lng["$code_base"] 
                                    + "/analyzer/TXT-Cpp.i")
    func_txt = get_file_content_or_die(FileTemplate)

    func_txt = blue_print(func_txt,
            [
                ["$$CONSTRUCTOR_EXTENSTION$$",                  Lng.SOURCE_REFERENCED(blackboard.class_constructor_extension)],
                ["$$CONVERTER_HELPER_I$$",                      Setup.get_file_reference(Setup.output_buffer_codec_header_i)],
                ["$$CONSTRUCTOR_MODE_DB_INITIALIZATION_CODE$$", get_constructor_code(ModeDB.values())],
                ["$$MEMENTO_EXTENSIONS_PACK$$",                 Lng.SOURCE_REFERENCED(blackboard.memento_pack_extension)],
                ["$$MEMENTO_EXTENSIONS_UNPACK$$",               Lng.SOURCE_REFERENCED(blackboard.memento_unpack_extension)],
                ])
    return func_txt
Exemple #25
0
def do(Mode, ModeNameList):
    assert required_support_indentation_count()
    # 'on_dedent' and 'on_n_dedent cannot be defined at the same time.
    assert not (    E_IncidenceIDs.INDENTATION_DEDENT   in Mode.incidence_db \
                and E_IncidenceIDs.INDENTATION_N_DEDENT in Mode.incidence_db)

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_INDENT)
    if code_fragment is not None:
        on_indent_str   = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_indent_str   = Lng.TOKEN_SEND("QUEX_SETTING_TOKEN_ID_INDENT")

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_NODENT)
    if code_fragment is not None:
        on_nodent_str   = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_nodent_str   = Lng.TOKEN_SEND("QUEX_SETTING_TOKEN_ID_NODENT")

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_DEDENT)
    if code_fragment is not None:
        on_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_dedent_str = Lng.TOKEN_SEND("QUEX_SETTING_TOKEN_ID_DEDENT")

    code_fragment   = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_N_DEDENT)
    if code_fragment is not None:
        on_n_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_n_dedent_str = Lng.TOKEN_SEND_N("ClosedN", "QUEX_SETTING_TOKEN_ID_DEDENT")

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_ERROR)
    on_indentation_error = ""
    if code_fragment is not None:
        on_indentation_error = Lng.SOURCE_REFERENCED(code_fragment) 

    # Note: 'on_indentation_bad' is applied in code generation for 
    #       indentation counter in 'indentation_counter.py'.
    return blue_print(on_indentation_str, [
        ["$$DEFINE_SELF$$",                 Lng.DEFINE_SELF("me")],
        ["$$MODE_DEFINITION$$",             Lng.MODE_DEFINITION(ModeNameList)],
        ["$$MODE_UNDEFINITION$$",           Lng.MODE_UNDEFINITION(ModeNameList)],
        ["$$INDENT-PROCEDURE$$",            on_indent_str],
        ["$$NODENT-PROCEDURE$$",            on_nodent_str],
        ["$$DEDENT-PROCEDURE$$",            on_dedent_str],
        ["$$N-DEDENT-PROCEDURE$$",          on_n_dedent_str],
        ["$$INDENTATION-ERROR-PROCEDURE$$", on_indentation_error]
    ])
Exemple #26
0
def do(ModeDb):
    LexerClassName = Setup.analyzer_class_name
    DerivedClassName = Setup.analyzer_derived_class_name

    mode_setup_txt = __setup(ModeDb)

    # -- mode class member function definitions (on_entry, on_exit, has_base, ...)
    mode_class_member_functions_txt = write_member_functions(ModeDb.values())

    txt = Lng.FRAME_IN_NAMESPACE_MAIN("".join([
        mode_setup_txt,
        mode_class_member_functions_txt,
    ]))

    return blue_print(txt,
                      [["$$LEXER_CLASS_NAME$$", LexerClassName],
                       ["$$LEXER_DERIVED_CLASS_NAME$$", DerivedClassName]])
Exemple #27
0
def do_implementation(ModeDB, MemberFunctionSignatureList):

    func_txt = Lng.open_template(Lng.analyzer_template_i_file())

    if blackboard.mode_db:
        map_token_ids_to_names_str = token_id_maker.do_map_id_to_name_cases()
    else:
        map_token_ids_to_names_str = ""

    func_txt = blue_print(func_txt, [
        [
            "$$MEMBER_FUNCTION_ASSIGNMENT$$",
            Lng.MEMBER_FUNCTION_ASSIGNMENT(MemberFunctionSignatureList)
        ],
        [
            "$$CONSTRUCTOR_EXTENSTION$$",
            Lng.SOURCE_REFERENCED(blackboard.class_constructor_extension)
        ],
        [
            "$$DESTRUCTOR_EXTENSTION$$",
            Lng.SOURCE_REFERENCED(blackboard.class_destructor_extension)
        ],
        [
            "$$USER_DEFINED_PRINT$$",
            Lng.SOURCE_REFERENCED(blackboard.class_print_extension)
        ],
        [
            "$$RESET_EXTENSIONS$$",
            Lng.SOURCE_REFERENCED(blackboard.reset_extension)
        ],
        [
            "$$MEMENTO_EXTENSIONS_PACK$$",
            Lng.SOURCE_REFERENCED(blackboard.memento_pack_extension)
        ],
        [
            "$$MEMENTO_EXTENSIONS_UNPACK$$",
            Lng.SOURCE_REFERENCED(blackboard.memento_unpack_extension)
        ],
        [
            "$$INCLUDE_TOKEN_ID_HEADER$$",
            Lng.INCLUDE(Setup.output_token_id_file_ref)
        ],
        ["$$MAP_ID_TO_NAME_CASES$$", map_token_ids_to_names_str],
    ])

    return "\n%s\n" % func_txt
Exemple #28
0
def do(setup):
    """________________________________________________________________________
       (1) Error Check 
       
       (2) Generates a file containing:
    
       -- token id definitions (if they are not done in '--foreign-token-id-file').

       -- const string& TokenClass::map_token_id_to_name(), i.e. a function which can 
          convert token ids into strings.
       ________________________________________________________________________
    """
    global file_str
    # At this point, assume that the token type has been generated.
    assert token_db.token_type_definition is not None

    # (1) Error Check
    #
    __warn_implicit_token_definitions()
    if not Setup.token_class_only_f:
        __error_on_no_specific_token_ids()

    if Setup.extern_token_id_file:
        __error_on_mandatory_token_id_missing()
        return None

    __autogenerate_token_id_numbers()
    __warn_on_double_definition()
    # If a mandatory token id is missing, this means that Quex did not
    # properly do implicit token definitions. Program error-abort.
    __error_on_mandatory_token_id_missing(AssertF=True)

    # (2) Generate token id file (if not specified outside)
    #
    if not Setup.extern_token_id_file:
        token_id_txt = __get_token_id_definition_txt()
    else:
        # Content of file = inclusion of 'Setup.extern_token_id_file'.
        token_id_txt = ["%s\n" % Lng.INCLUDE(Setup.extern_token_id_file)]

    return blue_print(file_str, [
        ["$$TOKEN_ID_DEFINITIONS$$", "".join(token_id_txt)],
        ["$$DATE$$", time.asctime()],
        ["$$TOKEN_PREFIX$$", Setup.token_id_prefix],
    ])
Exemple #29
0
def __replace_function_names(txt, mode, NullFunctionsF=True):
    def name(Name, ModeName):
        return Lng.NAME_IN_NAMESPACE_MAIN("%s_%s" % (ModeName, Name))

    if NullFunctionsF and not mode.incidence_db.has_key(
            E_IncidenceIDs.MODE_ENTRY):
        on_entry = Lng.NAME_IN_NAMESPACE_MAIN(
            "Mode_on_entry_exit_null_function")
    else:
        on_entry = name("on_entry", mode.name)

    if NullFunctionsF and not mode.incidence_db.has_key(
            E_IncidenceIDs.MODE_EXIT):
        on_exit = Lng.NAME_IN_NAMESPACE_MAIN(
            "Mode_on_entry_exit_null_function")
    else:
        on_exit = name("on_exit", mode.name)

    if NullFunctionsF and not mode.incidence_db.has_key(
            E_IncidenceIDs.INDENTATION_HANDLER):
        on_indentation = Lng.NAME_IN_NAMESPACE_MAIN(
            "Mode_on_indentation_null_function")
    else:
        on_indentation = name("on_indentation", mode.name)

    return blue_print(txt, [
        ["$$MN$$", mode.name],
        ["$analyzer_function",
         name("analyzer_function", mode.name)],
        ["$on_indentation", on_indentation],
        ["$on_entry", on_entry],
        ["$on_exit", on_exit],
        ["$has_base", name("has_base", mode.name)],
        ["$has_entry_from",
         name("has_entry_from", mode.name)],
        ["$has_exit_to", name("has_exit_to", mode.name)],
        [
            "$on_buffer_before_change",
            name("on_buffer_before_change", mode.name)
        ],
        ["$on_buffer_overflow",
         name("on_buffer_overflow", mode.name)],
    ])
def _do(UnicodeTrafoInfo):
    """
    PURPOSE: Writes converters for conversion towards UTF8/UTF16/UCS2/UCS4.

    UnicodeTrafoInfo:

       Provides the information about the relation of character codes in a particular 
       coding to unicode character codes. It is provided in the following form:

       # Codec Values                 Unicode Values
       [ (Source0_Begin, Source0_End, TargetInterval0_Begin), 
         (Source1_Begin, Source1_End, TargetInterval1_Begin),
         (Source2_Begin, Source2_End, TargetInterval2_Begin), 
         ... 
       ]
    """
    codec_name = make_safe_identifier(UnicodeTrafoInfo.name).lower()
    utf8_epilog,  utf8_function_body  = ConverterWriterUTF8().do(UnicodeTrafoInfo)
    utf16_prolog, utf16_function_body = ConverterWriterUTF16().do(UnicodeTrafoInfo)
    dummy,        utf32_function_body = ConverterWriterUTF32().do(UnicodeTrafoInfo)

    # Provide only the constant which are necessary
    FileName = os.path.normpath(  QUEX_PATH
                                + Lng["$code_base"] 
                                + "/converter_helper/TXT-from-codec-buffer.i")
    codec_header = Setup.get_file_reference(Setup.output_buffer_codec_header)

    txt_i = blue_print(get_file_content_or_die(FileName), 
                       [["$$CODEC$$",        codec_name],
                        ["$$EPILOG$$",       utf8_epilog],
                        ["$$CODEC_HEADER$$", codec_header],
                        ["$$BODY_UTF8$$",    utf8_function_body],
                        ["$$BODY_UTF16$$",   utf16_function_body],
                        ["$$BODY_UTF32$$",   utf32_function_body]])

    # A separate declaration header is required
    FileName = os.path.normpath(  QUEX_PATH
                                + Lng["$code_base"] 
                                + "/converter_helper/TXT-from-codec-buffer")
    template_h_txt = get_file_content_or_die(FileName)
    txt_h = template_h_txt.replace("$$CODEC$$", codec_name)
    return txt_h, txt_i
Exemple #31
0
def do(ModeDb):
    LexerClassName   = Setup.analyzer_class_name
    DerivedClassName = Setup.analyzer_derived_class_name

    mode_db_setup_txt = __setup(ModeDb)

    # -- mode class member function definitions (on_entry, on_exit, has_base, ...)
    mode_class_member_functions_txt = write_member_functions(ModeDb.values())

    txt  = "".join([
        "QUEX_NAMESPACE_MAIN_OPEN\n",
        mode_db_setup_txt,
        mode_class_member_functions_txt,
        "QUEX_NAMESPACE_MAIN_CLOSE\n"
    ])

    txt = blue_print(txt, [["$$LEXER_CLASS_NAME$$",         LexerClassName],
                           ["$$LEXER_DERIVED_CLASS_NAME$$", DerivedClassName]])
    
    return txt
Exemple #32
0
def do(Modes):
    LexerClassName = Setup.analyzer_class_name
    DerivedClassName = Setup.analyzer_derived_class_name

    # -- mode class member function definitions (on_entry, on_exit, has_base, ...)
    mode_class_member_functions_txt = write_member_functions(Modes.values())

    mode_objects_txt = ""
    for mode_name, mode in Modes.items():
        if mode.abstract_f(): continue
        mode_objects_txt += "/* Global */QUEX_NAME(Mode)  QUEX_NAME(%s);\n" % mode_name

    txt = "%s%s%s%s" % ("QUEX_NAMESPACE_MAIN_OPEN\n", mode_objects_txt,
                        mode_class_member_functions_txt,
                        "QUEX_NAMESPACE_MAIN_CLOSE\n")

    txt = blue_print(txt, [["$$LEXER_CLASS_NAME$$", LexerClassName],
                           ["$$LEXER_DERIVED_CLASS_NAME$$", DerivedClassName]])

    return txt
def _do(UnicodeTrafoInfo):
    """
    PURPOSE: Writes converters for conversion towards UTF8/UTF16/UCS2/UCS4.

    UnicodeTrafoInfo:

       Provides the information about the relation of character codes in a particular 
       coding to unicode character codes. It is provided in the following form:

       # Codec Values                 Unicode Values
       [ (Source0_Begin, Source0_End, TargetInterval0_Begin), 
         (Source1_Begin, Source1_End, TargetInterval1_Begin),
         (Source2_Begin, Source2_End, TargetInterval2_Begin), 
         ... 
       ]
    """
    codec_name = make_safe_identifier(UnicodeTrafoInfo.name).lower()
    utf8_epilog, utf8_function_body = ConverterWriterUTF8().do(
        UnicodeTrafoInfo)
    utf16_prolog, utf16_function_body = ConverterWriterUTF16().do(
        UnicodeTrafoInfo)
    dummy, utf32_function_body = ConverterWriterUTF32().do(UnicodeTrafoInfo)

    # Provide only the constant which are necessary
    FileName = os.path.normpath(QUEX_PATH + Lng["$code_base"] +
                                "/converter_helper/TXT-from-codec-buffer.i")
    codec_header = Setup.get_file_reference(Setup.output_buffer_codec_header)

    txt_i = blue_print(get_file_content_or_die(FileName),
                       [["$$CODEC$$", codec_name], ["$$EPILOG$$", utf8_epilog],
                        ["$$CODEC_HEADER$$", codec_header],
                        ["$$BODY_UTF8$$", utf8_function_body],
                        ["$$BODY_UTF16$$", utf16_function_body],
                        ["$$BODY_UTF32$$", utf32_function_body]])

    # A separate declaration header is required
    FileName = os.path.normpath(QUEX_PATH + Lng["$code_base"] +
                                "/converter_helper/TXT-from-codec-buffer")
    template_h_txt = get_file_content_or_die(FileName)
    txt_h = template_h_txt.replace("$$CODEC$$", codec_name)
    return txt_h, txt_i
Exemple #34
0
def __cpp_terminal_states(StateMachineName, sm, action_db, DefaultAction):

    # -- specific terminal states of patterns (entered from acceptance states)
    txt = ""
    for acceptance_id in action_db.keys():
        txt += "  %s:\n" % get_label("", None, acceptance_id)
        action_code = "    " + action_db[acceptance_id].replace("\n", "\n    ")
        txt += "    QUEX_STREAM_SEEK(last_acceptance_input_position);"
        txt += action_code + "\n"
        txt += "    // if action code returns from the function, then the following is meaningless\n"
        if sm.states[sm.init_state_index].target_map.is_empty() == False:
            txt += "    QUEX_STREAM_GET(input);"
        txt += "    goto QUEX_LABEL_%s_ENTRY_INITIAL_STATE;\n" % StateMachineName

    specific_terminal_states_str = txt

    #  -- general terminal state (entered from non-acceptance state)
    txt = ""
    for acceptance_id in action_db.keys():
        txt += "     case %s: goto %s;\n" % \
                (repr(acceptance_id), get_label("", None, acceptance_id))
    jumps_to_acceptance_states_str = txt

    #     -- execute default pattern action
    #     -- reset character stream to last success
    #     -- goto initial state
    txt = blue_print(
        __cpp_terminal_state_str,
        [["%%JUMPS_TO_ACCEPTANCE_STATE%%", jumps_to_acceptance_states_str],
         ["%%SPECIFIC_TERMINAL_STATES%%", specific_terminal_states_str],
         ["%%DEFAULT_ACTION%%",
          DefaultAction.replace("\n", "        \n")],
         ["%%STATE_MACHINE_NAME%%", StateMachineName],
         [
             "%%INITIAL_STATE_INDEX_LABEL%%",
             get_label(StateMachineName, sm.init_state_index)
         ]])
    return txt
Exemple #35
0
def do():
    """RETURNS: list of (content, file_name)

       where 'content' is the content to be written into 'file_name'.
    """
    if not Setup.converter_only_f:
        source_name = "lexeme"
    else:
        if Setup.converter_source_name:
            source_name = Setup.converter_source_name
        else:
            source_name = Setup.buffer_encoding.name

    header_txt = Lng.template_converter_header()
    implementation_txt = blue_print(
        Lng.template_converter_implementation(),
        [("$$CONVERTER_HEADER$$", Lng.file_name_converter_header(source_name)),
         ("$$CHARACTER_CONVERTERS$$", _character_converters()),
         ("$$STRING_CONVERTERS$$", _string_converters())])

    if Setup.converter_only_f:
        implementation_txt = implementation_txt.replace(
            "QUEX_TYPE_LEXATOM", Setup.lexatom.type)
        implementation_txt = implementation_txt.replace(
            "QUEX_INLINE", Lng.INLINE)
        implementation_txt = Lng.Match_QUEX_NAME_lexeme.sub(
            "QUEX_NAME(%s_" % source_name, implementation_txt)
        header_txt = header_txt.replace("QUEX_TYPE_LEXATOM",
                                        Setup.lexatom.type)
        header_txt = header_txt.replace("QUEX_INLINE", Lng.INLINE)
        header_txt = Lng.Match_QUEX_NAME_lexeme.sub(
            "QUEX_NAME(%s_" % source_name, header_txt)

    return [
        (header_txt, Lng.file_name_converter_header(source_name)),
        (implementation_txt,
         Lng.file_name_converter_implementation(source_name)),
    ]
Exemple #36
0
def do(Modes):
    LexerClassName   = Setup.analyzer_class_name
    DerivedClassName = Setup.analyzer_derived_class_name

    # -- mode class member function definitions (on_entry, on_exit, has_base, ...)
    mode_class_member_functions_txt = write_member_functions(Modes.values())

    mode_objects_txt = ""    
    for mode_name, mode in Modes.items():
        if mode.abstract_f(): continue
        mode_objects_txt += "/* Global */QUEX_NAME(Mode)  QUEX_NAME(%s);\n" % mode_name

    txt  = "%s%s%s%s" % (
        "QUEX_NAMESPACE_MAIN_OPEN\n",
        mode_objects_txt,
        mode_class_member_functions_txt,
        "QUEX_NAMESPACE_MAIN_CLOSE\n"
    )

    txt = blue_print(txt, [["$$LEXER_CLASS_NAME$$",         LexerClassName],
                           ["$$LEXER_DERIVED_CLASS_NAME$$", DerivedClassName]])
    
    return txt
Exemple #37
0
def _table_character_converters(unicode_trafo_info):
    """
    PURPOSE: Writes converters for conversion towards UTF8/UTF16/UCS2/UCS4.

    UnicodeTrafoInfo:

       Provides the information about the relation of character codes in a particular 
       coding to unicode character codes:

               # Codec Values                 Unicode Values
               [ 
                 (Source0_Begin, Source0_End, TargetInterval0_Begin), 
                 (Source1_Begin, Source1_End, TargetInterval1_Begin),
                 (Source2_Begin, Source2_End, TargetInterval2_Begin), 
                 ... 
               ]

    """
    encoding_name = Lng.SAFE_IDENTIFIER(unicode_trafo_info.name)
    if encoding_name in ("utf32", "unicode"):
        source_interval_begin = 0
        lexatom_size_in_byte = Setup.lexatom.size_in_byte
        if lexatom_size_in_byte == -1: lexatom_size_in_byte = 4
        source_interval_end = min(256**lexatom_size_in_byte, 0x200000)
        target_interval_begin = 0
        unicode_trafo_info = [(source_interval_begin, source_interval_end,
                               target_interval_begin)]

    utf8_function_body = ConverterWriterUTF8().do(unicode_trafo_info)
    utf16_function_body = ConverterWriterUTF16().do(unicode_trafo_info)
    utf32_function_body = ConverterWriterUTF32().do(unicode_trafo_info)

    return blue_print(Lng.template_converter_character_functions(),
                      [["$$BODY_UTF8$$", utf8_function_body],
                       ["$$BODY_UTF16$$", utf16_function_body],
                       ["$$BODY_UTF32$$", utf32_function_body]])
Exemple #38
0
 def LEXEME_MACRO_SETUP(self):
     return blue_print(cpp.lexeme_macro_setup, [
         ["$$LEXEME_LENGTH$$",  self.LEXEME_LENGTH()],
         ["$$INPUT_P$$",        self.INPUT_P()],
     ])
Exemple #39
0
def do(ModeDb):
    assert ModeDb

    txt = Lng.open_template(Lng.analyzer_configuration_file())

    lexatom_loader_seek_buffer_size = 512
    indentation_stack_size          = 64
    buffer_size                     = 131072
    buffer_size_min                 = 32768
    converter_buffer_size           = 65536
    mode_stack_size                 = 64
    if   buffer_size_min >= 1024: fallback_n = 256
    elif buffer_size_min >= 16:   fallback_n = buffer_size >> 4
    else:                         fallback_n = 0

    if Setup.fallback_optional_f:
        fallback_n = 0
    else:
        pre_context_lengths = [m.longest_pre_context() for m in ModeDb.itervalues()]
        if None in pre_context_lengths or not pre_context_lengths:
            # Assert: any specification has taken care of constraint:
            assert not Setup.fallback_mandatory_f
            fallback_n = 0
        else:
            fallback_n = "%s" % max(pre_context_lengths)

    adaptable_list = [
        ("BUFFER_SIZE",                            "%s" % buffer_size),
        ("BUFFER_FALLBACK_N",                      "%s" % fallback_n),
        ("BUFFER_SIZE_MIN",                        "QUEX_SETTING_BUFFER_SIZE"),
        ("INDENTATION_STACK_SIZE",                 "%s" % indentation_stack_size),
        ("BUFFER_LEXATOM_LOADER_CONVERTER_BUFFER_SIZE", "(size_t)%s" % converter_buffer_size),
        ("BUFFER_LEXATOM_LOADER_SEEK_BUFFER_SIZE", lexatom_loader_seek_buffer_size),
        ("MODE_STACK_SIZE",                        "(size_t)%s" % mode_stack_size), 
        ("TOKEN_QUEUE_SIZE",                       "(size_t)%s" % repr(Setup.token_queue_size)),
    ]
    immutable_list = [
        ("VERSION",                         '"%s"' % QUEX_VERSION),
        ("ANALYZER_VERSION",                '"%s"' % Setup.user_application_version_id),
        ("BUILD_DATE",                      '"%s"' % time.asctime()),
        ("MODE_INITIAL_P",                  '&%s'  % Lng.NAME_IN_NAMESPACE_MAIN(blackboard.initial_mode.get_pure_text())),
        ("BUFFER_LEXATOM_BUFFER_BORDER",    "0x%X" % Setup.buffer_limit_code),
        ("BUFFER_LEXATOM_NEWLINE",          _lexatom_newline_in_engine_encoding()),
        ("BUFFER_LEXATOM_PATH_TERMINATION", "0x%X" % Setup.path_limit_code),
        ("BUFFER_FALLBACK_N",               "%s"   % fallback_n),
        ("FALLBACK_MANDATORY",              {True: Lng.TRUE, False: Lng.FALSE}[Setup.fallback_mandatory_f])
    ]

    adaptable_txt = [ Lng.QUEX_SETTING_DEF(name, value) for name, value in adaptable_list ]
    immutable_txt = [ Lng.QUEX_SETTING_DEF(name, value) for name, value in immutable_list ]

    setting_list = [ name for name, dummy in chain(adaptable_list, immutable_list) ]

    txt = blue_print(txt, [
         ["$$ADAPTABLE$$",        "\n".join(adaptable_txt)],
         ["$$IMMUTABLE$$",        "\n".join(immutable_txt)],
         ["$$TYPE_DEFINITIONS$$", _type_definitions()],
         ["$$HELP_IF_CONFIGURATION_BY_CMAKE$$", Lng.HELP_IF_CONFIGURATION_BY_CMAKE(adaptable_list + immutable_list)],
         ["$$ERROR_IF_NO_CONFIGURATION_BY_MACRO$$", Lng.ERROR_IF_DEFINED_AND_NOT_CONFIGURATION_BY_MACRO(setting_list)],
     ])

    return txt # adapt.do(txt, Setup.output_directory)
Exemple #40
0
def get_skipper(EndSequence, Mode=None, IndentationCounterTerminalID=None, OnSkipRangeOpenStr=""):
    assert type(EndSequence) == list
    assert len(EndSequence) >= 1
    assert map(type, EndSequence) == [int] * len(EndSequence)

    local_variable_db = {}

    global template_str

    LanguageDB = Setup.language_db

    # Name the $$SKIPPER$$
    skipper_index = sm_index.get()

    # Determine the $$DELIMITER$$
    delimiter_str, delimiter_comment_str = get_character_sequence(EndSequence)
    delimiter_length = len(EndSequence)

    tmp = []
    LanguageDB.COMMENT(tmp, "                         Delimiter: %s" % delimiter_comment_str)
    delimiter_comment_str = "".join(tmp)
    # Determine the check for the tail of the delimiter
    delimiter_remainder_test_str = ""
    if len(EndSequence) != 1:
        txt = ""
        i = 0
        for letter in EndSequence[1:]:
            i += 1
            txt += "    %s\n" % LanguageDB.ASSIGN("input", LanguageDB.INPUT_P_DEREFERENCE(i - 1))
            txt += "    %s" % LanguageDB.IF_INPUT("!=", "Skipper$$SKIPPER_INDEX$$[%i]" % i)
            txt += "         %s" % LanguageDB.GOTO(skipper_index)
            txt += "    %s" % LanguageDB.END_IF()
        delimiter_remainder_test_str = txt

    if not end_delimiter_is_subset_of_indentation_counter_newline(Mode, EndSequence):
        goto_after_end_of_skipping_str = LanguageDB.GOTO(E_StateIndices.ANALYZER_REENTRY)
    else:
        # If there is indentation counting involved, then the counter's terminal id must
        # be determined at this place.
        assert IndentationCounterTerminalID is not None
        # If the ending delimiter is a subset of what the 'newline' pattern triggers
        # in indentation counting => move on to the indentation counter.
        goto_after_end_of_skipping_str = LanguageDB.GOTO_TERMINAL(IndentationCounterTerminalID)

    if OnSkipRangeOpenStr != "":
        on_skip_range_open_str = OnSkipRangeOpenStr
    else:
        on_skip_range_open_str = get_on_skip_range_open(Mode, EndSequence)

    # The main part
    code_str = blue_print(
        template_str,
        [
            ["$$DELIMITER_COMMENT$$", delimiter_comment_str],
            ["$$INPUT_P_INCREMENT$$", LanguageDB.INPUT_P_INCREMENT()],
            ["$$INPUT_P_DECREMENT$$", LanguageDB.INPUT_P_DECREMENT()],
            ["$$INPUT_GET$$", LanguageDB.ACCESS_INPUT()],
            ["$$IF_INPUT_EQUAL_DELIMITER_0$$", LanguageDB.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")],
            ["$$ENDIF$$", LanguageDB.END_IF()],
            ["$$ENTRY$$", LanguageDB.LABEL(skipper_index)],
            ["$$RELOAD$$", get_label("$reload", skipper_index)],
            ["$$GOTO_ENTRY$$", LanguageDB.GOTO(skipper_index)],
            ["$$INPUT_P_TO_LEXEME_START$$", LanguageDB.INPUT_P_TO_LEXEME_START()],
            # When things were skipped, no change to acceptance flags or modes has
            # happend. One can jump immediately to the start without re-entry preparation.
            ["$$GOTO_AFTER_END_OF_SKIPPING$$", goto_after_end_of_skipping_str],
            ["$$MARK_LEXEME_START$$", LanguageDB.LEXEME_START_SET()],
            ["$$DELIMITER_REMAINDER_TEST$$", delimiter_remainder_test_str],
            ["$$ON_SKIP_RANGE_OPEN$$", on_skip_range_open_str],
        ],
    )

    # Line and column number counting
    code_str, reference_p_f = __lc_counting_replacements(code_str, EndSequence)

    # The finishing touch
    code_str = blue_print(
        code_str,
        [["$$SKIPPER_INDEX$$", __nice(skipper_index)], ["$$GOTO_RELOAD$$", get_label("$reload", skipper_index)]],
    )

    if reference_p_f:
        variable_db.enter(local_variable_db, "reference_p", Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")

    variable_db.enter(local_variable_db, "Skipper%i", "{ %s }" % delimiter_str, delimiter_length, Index=skipper_index)
    variable_db.enter(local_variable_db, "Skipper%iL", "%i" % delimiter_length, Index=skipper_index)
    variable_db.enter(local_variable_db, "text_end")
    return code_str, local_variable_db
Exemple #41
0
def do(ModeDB):
    assert blackboard.token_type_definition is not None
    

    QuexClassHeaderFileTemplate = os.path.normpath(  QUEX_PATH
                                                   + Lng["$code_base"] 
                                                   + Lng["$analyzer_template_file"]).replace("//","/")
    LexerClassName = Setup.analyzer_class_name

    quex_converter_coding_name_str = Setup.converter_ucs_coding_name

    mode_id_definition_str = "" 
    # NOTE: First mode-id needs to be '1' for compatibility with flex generated engines
    for i, info in enumerate(ModeDB.items()):
        name = info[0]
        mode = info[1]
        if mode.abstract_f(): continue
        mode_id_definition_str += "    QUEX_NAME(ModeID_%s) = %i,\n" % (name, i)

    if mode_id_definition_str != "":
        mode_id_definition_str = mode_id_definition_str[:-2]

    # -- instances of mode classes as members of the lexer
    mode_object_members_txt,     \
    mode_specific_functions_txt, \
    friend_txt                   = get_mode_class_related_code_fragments(ModeDB.values())

    # -- define a pointer that directly has the type of the derived class
    if Setup.analyzer_derived_class_name != "":
        analyzer_derived_class_name    = Setup.analyzer_derived_class_name
        derived_class_type_declaration = "class %s;" % Setup.analyzer_derived_class_name
    else:
        analyzer_derived_class_name    = Setup.analyzer_class_name
        derived_class_type_declaration = ""

    token_class_file_name = blackboard.token_type_definition.get_file_name()
    token_class_name      = blackboard.token_type_definition.class_name
    token_class_name_safe = blackboard.token_type_definition.class_name_safe

    template_code_txt = get_file_content_or_die(QuexClassHeaderFileTemplate)

    include_guard_ext = get_include_guard_extension(
            Lng.NAMESPACE_REFERENCE(Setup.analyzer_name_space) 
            + "__" + Setup.analyzer_class_name)

    if len(Setup.token_id_foreign_definition_file) != 0:
        token_id_definition_file = Setup.token_id_foreign_definition_file
    else:
        token_id_definition_file = Setup.output_token_id_file

    lexer_name_space_safe = get_include_guard_extension(Lng.NAMESPACE_REFERENCE(Setup.analyzer_name_space))

    txt = blue_print(template_code_txt,
            [
                ["$$___SPACE___$$",                      " " * (len(LexerClassName) + 1)],
                ["$$CLASS_BODY_EXTENSION$$",             Lng.SOURCE_REFERENCED(blackboard.class_body_extension)],
                ["$$CONVERTER_HELPER$$",                 Setup.get_file_reference(Setup.output_buffer_codec_header)],
                ["$$INCLUDE_GUARD_EXTENSION$$",          include_guard_ext],
                ["$$LEXER_CLASS_NAME$$",                 LexerClassName],
                ["$$LEXER_NAME_SPACE$$",                 lexer_name_space_safe],
                ["$$LEXER_CLASS_NAME_SAFE$$",            Setup.analyzer_name_safe],
                ["$$LEXER_CONFIG_FILE$$",                Setup.get_file_reference(Setup.output_configuration_file)],
                ["$$LEXER_DERIVED_CLASS_DECL$$",         derived_class_type_declaration],
                ["$$LEXER_DERIVED_CLASS_NAME$$",         analyzer_derived_class_name],
                ["$$QUEX_MODE_ID_DEFINITIONS$$",         mode_id_definition_str],
                ["$$MEMENTO_EXTENSIONS$$",               Lng.SOURCE_REFERENCED(blackboard.memento_class_extension)],
                ["$$MODE_CLASS_FRIENDS$$",               friend_txt],
                ["$$MODE_OBJECTS$$",                     mode_object_members_txt],
                ["$$MODE_SPECIFIC_ANALYSER_FUNCTIONS$$", mode_specific_functions_txt],
                ["$$PRETTY_INDENTATION$$",               "     " + " " * (len(LexerClassName)*2 + 2)],
                ["$$QUEX_TEMPLATE_DIR$$",                QUEX_PATH + Lng["$code_base"]],
                ["$$QUEX_VERSION$$",                     QUEX_VERSION],
                ["$$TOKEN_CLASS_DEFINITION_FILE$$",      Setup.get_file_reference(token_class_file_name)],
                ["$$TOKEN_CLASS$$",                      token_class_name],
                ["$$TOKEN_CLASS_NAME_SAFE$$",            token_class_name_safe],
                ["$$TOKEN_ID_DEFINITION_FILE$$",         Setup.get_file_reference(token_id_definition_file)],
                ["$$CORE_ENGINE_CHARACTER_CODING$$",     quex_converter_coding_name_str],
                ["$$USER_DEFINED_HEADER$$",              Lng.SOURCE_REFERENCED(blackboard.header) + "\n"],
             ])

    return txt
Exemple #42
0
def get_skipper(EndSequence, CloserPattern, ModeName, OnSkipRangeOpen, DoorIdAfter):
    assert len(EndSequence) >= 1

    global template_str

    # Name the $$SKIPPER$$
    skipper_index   = sm_index.get()
    skipper_door_id = dial_db.new_door_id(skipper_index)

    delimiter_str, delimiter_comment_str = get_character_sequence(EndSequence)

    end_sequence_transformed = transformation.do_sequence(EndSequence)

    # Determine the $$DELIMITER$$
    delimiter_length = len(end_sequence_transformed)

    delimiter_comment_str = Lng.COMMENT("                         Delimiter: %s" % delimiter_comment_str)

    # Determine the check for the tail of the delimiter
    delimiter_remainder_test_str = ""
    if len(EndSequence) != 1: 
        txt = "".join(
            "    %s" % Lng.IF_GOTO(Lng.INPUT_P_DEREFERENCE(i-1), "!=", 
                                   "Skipper$$SKIPPER_INDEX$$[%i]" % i,
                                   skipper_door_id, i == 1)
            for i, letter in enumerate(EndSequence[1:], start=1)
        )
        delimiter_remainder_test_str = txt

    door_id_reload = dial_db.new_door_id()
    on_skip_range_open = get_on_skip_range_open(OnSkipRangeOpen, CloserPattern)

    # The main part
    code_str = blue_print(template_str,
                          [
                           ["$$DELIMITER_COMMENT$$",              delimiter_comment_str],
                           ["$$INPUT_P_INCREMENT$$",              Lng.INPUT_P_INCREMENT()],
                           ["$$INPUT_P_DECREMENT$$",              Lng.INPUT_P_DECREMENT()],
                           ["$$INPUT_GET$$",                      Lng.ACCESS_INPUT()],
                           ["$$IF_INPUT_EQUAL_DELIMITER_0$$",     Lng.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")],
                           ["$$ENDIF$$",                          Lng.END_IF()],
                           ["$$ENTRY$$",                          dial_db.get_label_by_door_id(skipper_door_id)],
                           ["$$RELOAD$$",                         dial_db.get_label_by_door_id(door_id_reload)],
                           ["$$GOTO_ENTRY$$",                     Lng.GOTO(skipper_door_id)],
                           ["$$INPUT_P_TO_LEXEME_START$$",        Lng.INPUT_P_TO_LEXEME_START()],
                           # When things were skipped, no change to acceptance flags or modes has
                           # happend. One can jump immediately to the start without re-entry preparation.
                           ["$$GOTO_AFTER_END_OF_SKIPPING$$",     Lng.GOTO(DoorIdAfter)], 
                           ["$$MARK_LEXEME_START$$",              Lng.LEXEME_START_SET()],
                           ["$$DELIMITER_REMAINDER_TEST$$",       delimiter_remainder_test_str],
                           ["$$ON_SKIP_RANGE_OPEN$$",             on_skip_range_open],
                          ])

    # Line and column number counting
    code_str, reference_p_f = __lc_counting_replacements(code_str, EndSequence)

    # The finishing touch
    code_str = blue_print(code_str,
                          [["$$SKIPPER_INDEX$$", __nice(skipper_index)],
                           ["$$GOTO_RELOAD$$",   Lng.GOTO(door_id_reload)]])

    if reference_p_f:
        variable_db.require("reference_p", Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")

    variable_db.require_array("Skipper%i", Initial="{ %s }" % delimiter_str, ElementN=delimiter_length, Index=skipper_index)
    variable_db.require("Skipper%iL", "%i" % delimiter_length, Index=skipper_index)
    variable_db.require("text_end")

    variable_db.require("input") 

    return [ code_str ]
Exemple #43
0
def __terminal_states(StateMachineName, action_db, OnFailureAction,
                      EndOfStreamAction, PreConditionIDList, LanguageDB,
                      VariableDB, OnAfterMatchStr, LexemeNullObjectName):
    """NOTE: During backward-lexing, for a pre-condition, there is not need for terminal
             states, since only the flag 'pre-condition fulfilled is raised.
    """

    # (*) specific terminal states of patterns (entered from acceptance states)
    specific_terminal_states = []
    for pattern_id, pattern_action_info in action_db.items():
        if pattern_id in PreConditionIDList: continue
        code = get_terminal_code(pattern_id, pattern_action_info, LanguageDB)
        specific_terminal_states.extend(code)

    delete_pre_context_flags = []
    for pre_context_sm_id in PreConditionIDList:
        delete_pre_context_flags.append("    ")
        delete_pre_context_flags.append(
            LanguageDB.ASSIGN(
                "pre_context_%s_fulfilled_f" % __nice(pre_context_sm_id), 0))

    # If there is at least a single terminal, the the 're-entry' preparation must be accomplished
    if len(action_db) != 0: get_label("$re-start", U=True)

    #  -- execute 'on_failure' pattern action
    #  -- goto initial state
    end_of_stream_code_action_str = EndOfStreamAction.action().get_code()

    # -- FAILURE ACTION: Under 'normal' circumstances the on_failure action is simply to be executed
    #                    since the 'get_forward()' incremented the 'current' pointer.
    #                    HOWEVER, when end of file has been reached the 'current' pointer has to
    #                    be reset so that the initial state can drop out on the buffer limit code
    #                    and then transit to the end of file action.
    # NOTE: It is possible that 'miss' happens after a chain of characters appeared. In any case the input
    #       pointer must be setup right after the lexeme start. This way, the lexer becomes a new chance as
    #       soon as possible.
    on_failure = __terminal_on_failure_prolog(LanguageDB)
    msg = OnFailureAction.action().get_code()

    on_failure.append(msg)

    prolog = blue_print(__terminal_state_prolog, [
        ["$$LEXEME_LENGTH$$", LanguageDB.LEXEME_LENGTH()],
        ["$$INPUT_P$$", LanguageDB.INPUT_P()],
        ["$$LEXEME_NULL_OBJECT$$", LexemeNullObjectName],
    ])

    router = Address(
        "$terminal-router",
        None,
        [
            blue_print(__terminal_router_prolog_str, [
                [
                    "$$TERMINAL_FAILURE-REF$$",
                    "QUEX_LABEL(%i)" % get_address("$terminal-FAILURE")
                ],
                ["$$TERMINAL_FAILURE$$",
                 get_label("$terminal-FAILURE")],
            ]),
            # DO NOT 'U=True' for the state router. This is done automatically if
            # 'goto reload' is used.
            get_label("$state-router"),
            ";",
            __terminal_router_epilog_str,
        ])

    epilog = blue_print(__terminal_state_epilog, [
        ["$$FAILURE_ACTION$$", "".join(on_failure)],
        ["$$END_OF_STREAM_ACTION$$", end_of_stream_code_action_str],
        ["$$TERMINAL_END_OF_STREAM-DEF$$",
         get_label("$terminal-EOF")],
        ["$$TERMINAL_FAILURE-DEF$$",
         get_label("$terminal-FAILURE")],
        ["$$STATE_MACHINE_NAME$$", StateMachineName],
        ["$$GOTO_START_PREPARATION$$",
         get_label("$re-start", U=True)],
    ])

    reset_last_acceptance_str = ""
    if VariableDB.has_key("last_acceptance"):
        reset_last_acceptance_str = "last_acceptance = $$TERMINAL_FAILURE-REF$$; /* TERMINAL: FAILURE */"

    return_preparation = ""
    if OnAfterMatchStr != "":
        return_preparation = blue_print(
            __return_preparation_str,
            [["$$ON_AFTER_MATCH$$", OnAfterMatchStr]])

    reentry_preparation = blue_print(__reentry_preparation_str, [
        ["$$REENTRY_PREPARATION$$",
         get_label("$re-start")],
        [
            "$$DELETE_PRE_CONDITION_FULLFILLED_FLAGS$$",
            "".join(delete_pre_context_flags)
        ],
        ["$$GOTO_START$$", get_label("$start", U=True)],
        ["$$ON_AFTER_MATCH$$", OnAfterMatchStr],
        ["$$RESET_LAST_ACCEPTANCE$$", reset_last_acceptance_str],
        [
            "$$COMMENT_ON_POST_CONTEXT_INITIALIZATION$$",
            comment_on_post_context_position_init_str
        ],
        [
            "$$TERMINAL_FAILURE-REF$$",
            "QUEX_LABEL(%i)" % get_address("$terminal-FAILURE")
        ],
    ])

    txt = []
    txt.append(router)
    txt.append(prolog)
    txt.extend(specific_terminal_states)
    txt.append(epilog)
    txt.append(return_preparation)
    txt.append(reentry_preparation)

    return txt
Exemple #44
0
def __lc_counting_replacements(code_str, EndSequence):
    """Line and Column Number Counting(Range Skipper):
     
         -- in loop if there appears a newline, then do:
            increment line_n
            set position from where to count column_n
         -- at end of skipping do one of the following:
            if end delimiter contains newline:
               column_n = number of letters since last new line in end delimiter
               increment line_n by number of newlines in end delimiter.
               (NOTE: in this case the setting of the position from where to count
                      the column_n can be omitted.)
            else:
               column_n = current_position - position from where to count column number.

       NOTE: On reload we do count the column numbers and reset the column_p.
    """
    def get_character_n_after_last_newline(Sequence):
        tmp = copy(Sequence)
        tmp.reverse()
        try:    return tmp.index(ord('\n'))
        except: return -1

    char_n_after_last_newline = get_character_n_after_last_newline(EndSequence)

    reference_p_def = ""

    in_loop         = ""
    end_procedure   = ""
    before_reload   = ""
    after_reload    = ""
    on_first_delimiter = ""

    reference_p_required_f = False

    # Line/Column Counting:
    newline_number_in_delimiter = EndSequence.count(ord('\n'))

    if EndSequence == map(ord, "\n") or EndSequence == map(ord, "\r\n"):
        #  (1) If the end-delimiter is a newline 
        #      => there cannot appear a newline inside the comment
        #      => IN LOOP: no line number increment
        #                  no reference pointer required for column counting
        end_procedure += "        __QUEX_IF_COUNT_COLUMNS_SET((size_t)1);\n" 
        end_procedure += "        __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"

    else:
        #  (2) If end-delimiter is NOT newline
        #      => there can appear a newline inside the comment
        if newline_number_in_delimiter == 0:
            # -- no newlines in delimiter => line and column number 
            #                                must be counted.
            in_loop       = line_column_counter_in_loop()
            end_procedure = "        __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                            "                                    - reference_p));\n" 
            reference_p_required_f = True
        else:
            # -- newline inside delimiter => line number must be counted
            #                                column number is fixed.
            end_procedure = "        __QUEX_IF_COUNT_COLUMNS_SET((size_t)%i);\n" \
                            % (char_n_after_last_newline + 1)

            if    EndSequence[0] == ord('\n') \
               or len(EndSequence) > 1 and EndSequence[0:2] == [ord('\r'), ord('\n')]: 
                # If the first character in the sequence is newline, then the line counting
                # may is prevented by the loop exit. Now, we need to count.
                on_first_delimiter = "/* First delimiter char was a newline */\n" + \
                                     "    __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n" 
                end_procedure += "        __QUEX_IF_COUNT_LINES_ADD((size_t)%i);\n" % (newline_number_in_delimiter - 1)
            else:
                in_loop        = line_counter_in_loop()
                end_procedure += "        __QUEX_IF_COUNT_LINES_ADD((size_t)%i);\n" % newline_number_in_delimiter

        
    if reference_p_required_f:
        reference_p_def = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
        before_reload   = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                          "                                - reference_p));\n" 
        after_reload    = "        __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    if len(EndSequence) > 1:
        end_procedure = "%s\n%s" % (Lng.INPUT_P_ADD(len(EndSequence)-1), end_procedure)

    return blue_print(code_str,
                     [["$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$", reference_p_def],
                      ["$$LC_COUNT_IN_LOOP$$",                     in_loop],
                      ["$$LC_COUNT_END_PROCEDURE$$",               end_procedure],
                      ["$$LC_COUNT_BEFORE_RELOAD$$",               before_reload],
                      ["$$LC_COUNT_AFTER_RELOAD$$",                after_reload],
                      ["$$LC_ON_FIRST_DELIMITER$$",                on_first_delimiter],
                      ]), \
           reference_p_required_f
Exemple #45
0
def get_skipper(OpenerSequence, CloserSequence, CloserPattern, ModeName, OnSkipRangeOpen, DoorIdAfter):
    assert len(OpenerSequence) >= 1
    assert len(CloserSequence) >= 1
    assert OpenerSequence != CloserSequence

    skipper_index   = sm_index.get()
    skipper_door_id = dial_db.new_door_id(skipper_index)

    opener_str, opener_comment_str = get_character_sequence(OpenerSequence)
    opener_length = len(OpenerSequence)
    closer_str, closer_comment_str = get_character_sequence(CloserSequence)
    closer_length = len(CloserSequence)

    variable_db.require("reference_p", Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")
    variable_db.require("counter")
    variable_db.require_array("Skipper%i_Opener", Initial="{ %s }" % opener_str, ElementN=opener_length, Index = skipper_index)
    variable_db.require("Skipper%i_OpenerEnd", "Skipper%i_Opener + (ptrdiff_t)%i" % (skipper_index, opener_length), Index = skipper_index) 
    variable_db.require("Skipper%i_Opener_it", "0x0", Index = skipper_index) 
    variable_db.require_array("Skipper%i_Closer", Initial="{ %s }" % closer_str, ElementN=closer_length, Index = skipper_index) 
    variable_db.require("Skipper%i_CloserEnd", "Skipper%i_Closer + (ptrdiff_t)%i" % (skipper_index, closer_length), Index = skipper_index) 
    variable_db.require("Skipper%i_Closer_it", "0x0", Index = skipper_index) 

    reference_p_def = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
    before_reload   = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                      "                                - reference_p));\n" 
    after_reload    = "        __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    if CloserSequence[-1] == ord('\n'):
        end_procedure  = "       __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
        end_procedure += "       __QUEX_IF_COUNT_COLUMNS_SET((size_t)1);\n"
    else:
        end_procedure = "        __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                        "                                    - reference_p));\n" 

    reload_door_id     = dial_db.new_door_id()
    on_skip_range_open = get_on_skip_range_open(OnSkipRangeOpen, CloserPattern, NestedF=True)

    code_str = blue_print(template_str, [
                   ["$$SKIPPER_INDEX$$",   __nice(skipper_index)],
                   #
                   ["$$OPENER_LENGTH$$",                  "%i" % opener_length],
                   ["$$INPUT_P_INCREMENT$$",              Lng.INPUT_P_INCREMENT()],
                   ["$$INPUT_P_DECREMENT$$",              Lng.INPUT_P_DECREMENT()],
                   ["$$INPUT_GET$$",                      Lng.ACCESS_INPUT()],
                   ["$$IF_INPUT_EQUAL_DELIMITER_0$$",     Lng.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")],
                   ["$$ENDIF$$",                          Lng.END_IF()],
                   ["$$ENTRY$$",                          Lng.LABEL(skipper_door_id)],
                   ["$$RELOAD$$",                         dial_db.get_label_by_door_id(reload_door_id)],
                   ["$$GOTO_AFTER_END_OF_SKIPPING$$",     Lng.GOTO(DoorIdAfter)], 
                   ["$$GOTO_RELOAD$$",                    Lng.GOTO(reload_door_id)],
                   ["$$INPUT_P_TO_LEXEME_START$$",        Lng.INPUT_P_TO_LEXEME_START()],
                   # When things were skipped, no change to acceptance flags or modes has
                   # happend. One can jump immediately to the start without re-entry preparation.
                   ["$$GOTO_ENTRY$$",                     Lng.GOTO(skipper_door_id)],
                   ["$$MARK_LEXEME_START$$",              Lng.LEXEME_START_SET()],
                   ["$$ON_SKIP_RANGE_OPEN$$",             on_skip_range_open],
                   #
                   ["$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$", reference_p_def],
                   ["$$LC_COUNT_IN_LOOP$$",                     line_column_counter_in_loop()],
                   ["$$LC_COUNT_END_PROCEDURE$$",               end_procedure],
                   ["$$LC_COUNT_BEFORE_RELOAD$$",               before_reload],
                   ["$$LC_COUNT_AFTER_RELOAD$$",                after_reload],
               ])

    return [ code_str ]
Exemple #46
0
def __terminal_states(StateMachineName, action_db, OnFailureAction, EndOfStreamAction, 
                      PreConditionIDList, LanguageDB, VariableDB, OnAfterMatchStr, LexemeNullObjectName):
    """NOTE: During backward-lexing, for a pre-condition, there is not need for terminal
             states, since only the flag 'pre-condition fulfilled is raised.
    """      

    # (*) specific terminal states of patterns (entered from acceptance states)
    specific_terminal_states = []
    for pattern_id, pattern_action_info in action_db.items():
        if pattern_id in PreConditionIDList: continue
        code = get_terminal_code(pattern_id, pattern_action_info, LanguageDB)
        specific_terminal_states.extend(code)

    delete_pre_context_flags = []
    for pre_context_sm_id in PreConditionIDList:
        delete_pre_context_flags.append("    ")
        delete_pre_context_flags.append(LanguageDB.ASSIGN("pre_context_%s_fulfilled_f" % __nice(pre_context_sm_id), 0))

    # If there is at least a single terminal, the the 're-entry' preparation must be accomplished
    if len(action_db) != 0: get_label("$re-start", U=True)

    #  -- execute 'on_failure' pattern action 
    #  -- goto initial state    
    end_of_stream_code_action_str = EndOfStreamAction.action().get_code()

    # -- FAILURE ACTION: Under 'normal' circumstances the on_failure action is simply to be executed
    #                    since the 'get_forward()' incremented the 'current' pointer.
    #                    HOWEVER, when end of file has been reached the 'current' pointer has to
    #                    be reset so that the initial state can drop out on the buffer limit code
    #                    and then transit to the end of file action.
    # NOTE: It is possible that 'miss' happens after a chain of characters appeared. In any case the input
    #       pointer must be setup right after the lexeme start. This way, the lexer becomes a new chance as
    #       soon as possible.
    on_failure = __terminal_on_failure_prolog(LanguageDB)
    msg        = OnFailureAction.action().get_code()

    on_failure.append(msg)

    prolog = blue_print(__terminal_state_prolog,
                        [
                          ["$$LEXEME_LENGTH$$",      LanguageDB.LEXEME_LENGTH()],
                          ["$$INPUT_P$$",            LanguageDB.INPUT_P()],
                          ["$$LEXEME_NULL_OBJECT$$", LexemeNullObjectName],
                        ]
                       )

    router = Address("$terminal-router", None,
                  [
                      blue_print(__terminal_router_prolog_str,
                      [
                       ["$$TERMINAL_FAILURE-REF$$",         "QUEX_LABEL(%i)" % get_address("$terminal-FAILURE")],
                       ["$$TERMINAL_FAILURE$$",             get_label("$terminal-FAILURE")],
                      ]),
                      # DO NOT 'U=True' for the state router. This is done automatically if 
                      # 'goto reload' is used. 
                      get_label("$state-router"), ";",
                      __terminal_router_epilog_str, 
                  ])
                     
    epilog = blue_print(__terminal_state_epilog, 
             [
              ["$$FAILURE_ACTION$$",             "".join(on_failure)],
              ["$$END_OF_STREAM_ACTION$$",       end_of_stream_code_action_str],
              ["$$TERMINAL_END_OF_STREAM-DEF$$", get_label("$terminal-EOF")],
              ["$$TERMINAL_FAILURE-DEF$$",       get_label("$terminal-FAILURE")],
              ["$$STATE_MACHINE_NAME$$",         StateMachineName],
              ["$$GOTO_START_PREPARATION$$",     get_label("$re-start", U=True)],
             ])


    reset_last_acceptance_str = ""
    if VariableDB.has_key("last_acceptance"):
        reset_last_acceptance_str = "last_acceptance = $$TERMINAL_FAILURE-REF$$; /* TERMINAL: FAILURE */"

    return_preparation = ""
    if OnAfterMatchStr != "":
        return_preparation = blue_print(__return_preparation_str,
                                        [["$$ON_AFTER_MATCH$$",  OnAfterMatchStr]])

    reentry_preparation = blue_print(__reentry_preparation_str,
                          [["$$REENTRY_PREPARATION$$",                    get_label("$re-start")],
                           ["$$DELETE_PRE_CONDITION_FULLFILLED_FLAGS$$",  "".join(delete_pre_context_flags)],
                           ["$$GOTO_START$$",                             get_label("$start", U=True)],
                           ["$$ON_AFTER_MATCH$$",                         OnAfterMatchStr],
                           ["$$RESET_LAST_ACCEPTANCE$$",                  reset_last_acceptance_str],
                           ["$$COMMENT_ON_POST_CONTEXT_INITIALIZATION$$", comment_on_post_context_position_init_str],
                           ["$$TERMINAL_FAILURE-REF$$",                   "QUEX_LABEL(%i)" % get_address("$terminal-FAILURE")],
                          ])

    txt = []
    txt.append(router)
    txt.append(prolog)
    txt.extend(specific_terminal_states)
    txt.append(epilog)
    txt.append(return_preparation)
    txt.append(reentry_preparation)

    return txt
Exemple #47
0
 def HEADER_DEFINITIONS(self):
     return blue_print(cpp_header_definition_str, [
         ("$$CONTINUE_WITH_ON_AFTER_MATCH$$", dial_db.get_label_by_door_id(DoorID.continue_with_on_after_match())),
         ("$$RETURN_WITH_ON_AFTER_MATCH$$",   dial_db.get_label_by_door_id(DoorID.return_with_on_after_match())),
     ])
Exemple #48
0
def do(ModeDB):
    assert blackboard.token_type_definition is not None
    

    QuexClassHeaderFileTemplate = os.path.normpath(  QUEX_PATH
                                                   + Lng["$code_base"] 
                                                   + Lng["$analyzer_template_file"]).replace("//","/")
    LexerClassName = Setup.analyzer_class_name

    quex_converter_coding_name_str = Setup.converter_ucs_coding_name

    mode_id_definition_str = "" 
    # NOTE: First mode-id needs to be '1' for compatibility with flex generated engines
    for i, info in enumerate(ModeDB.items()):
        name = info[0]
        mode = info[1]
        if mode.abstract_f(): continue
        mode_id_definition_str += "    QUEX_NAME(ModeID_%s) = %i,\n" % (name, i)

    if mode_id_definition_str != "":
        mode_id_definition_str = mode_id_definition_str[:-2]

    # -- instances of mode classes as members of the lexer
    mode_object_members_txt,     \
    mode_specific_functions_txt, \
    friend_txt                   = get_mode_class_related_code_fragments(ModeDB.values())

    # -- define a pointer that directly has the type of the derived class
    if Setup.analyzer_derived_class_name != "":
        analyzer_derived_class_name    = Setup.analyzer_derived_class_name
        derived_class_type_declaration = "class %s;" % Setup.analyzer_derived_class_name
    else:
        analyzer_derived_class_name    = Setup.analyzer_class_name
        derived_class_type_declaration = ""

    token_class_file_name = blackboard.token_type_definition.get_file_name()
    token_class_name      = blackboard.token_type_definition.class_name
    token_class_name_safe = blackboard.token_type_definition.class_name_safe

    template_code_txt = get_file_content_or_die(QuexClassHeaderFileTemplate)

    include_guard_ext = get_include_guard_extension(
            Lng.NAMESPACE_REFERENCE(Setup.analyzer_name_space) 
            + "__" + Setup.analyzer_class_name)

    if len(Setup.token_id_foreign_definition_file) != 0:
        token_id_definition_file = Setup.token_id_foreign_definition_file
    else:
        token_id_definition_file = Setup.output_token_id_file

    lexer_name_space_safe = get_include_guard_extension(Lng.NAMESPACE_REFERENCE(Setup.analyzer_name_space))

    txt = blue_print(template_code_txt,
            [
                ["$$___SPACE___$$",                      " " * (len(LexerClassName) + 1)],
                ["$$CLASS_BODY_EXTENSION$$",             Lng.SOURCE_REFERENCED(blackboard.class_body_extension)],
                ["$$CONVERTER_HELPER$$",                 Setup.get_file_reference(Setup.output_buffer_codec_header)],
                ["$$INCLUDE_GUARD_EXTENSION$$",          include_guard_ext],
                ["$$LEXER_CLASS_NAME$$",                 LexerClassName],
                ["$$LEXER_NAME_SPACE$$",                 lexer_name_space_safe],
                ["$$LEXER_CLASS_NAME_SAFE$$",            Setup.analyzer_name_safe],
                ["$$LEXER_CONFIG_FILE$$",                Setup.get_file_reference(Setup.output_configuration_file)],
                ["$$LEXER_DERIVED_CLASS_DECL$$",         derived_class_type_declaration],
                ["$$LEXER_DERIVED_CLASS_NAME$$",         analyzer_derived_class_name],
                ["$$QUEX_MODE_ID_DEFINITIONS$$",         mode_id_definition_str],
                ["$$MEMENTO_EXTENSIONS$$",               Lng.SOURCE_REFERENCED(blackboard.memento_class_extension)],
                ["$$MODE_CLASS_FRIENDS$$",               friend_txt],
                ["$$MODE_OBJECTS$$",                     mode_object_members_txt],
                ["$$MODE_SPECIFIC_ANALYSER_FUNCTIONS$$", mode_specific_functions_txt],
                ["$$PRETTY_INDENTATION$$",               "     " + " " * (len(LexerClassName)*2 + 2)],
                ["$$QUEX_TEMPLATE_DIR$$",                QUEX_PATH + Lng["$code_base"]],
                ["$$QUEX_VERSION$$",                     QUEX_VERSION],
                ["$$TOKEN_CLASS_DEFINITION_FILE$$",      Setup.get_file_reference(token_class_file_name)],
                ["$$TOKEN_CLASS$$",                      token_class_name],
                ["$$TOKEN_CLASS_NAME_SAFE$$",            token_class_name_safe],
                ["$$TOKEN_ID_DEFINITION_FILE$$",         Setup.get_file_reference(token_id_definition_file)],
                ["$$CORE_ENGINE_CHARACTER_CODING$$",     quex_converter_coding_name_str],
                ["$$USER_DEFINED_HEADER$$",              Lng.SOURCE_REFERENCED(blackboard.header) + "\n"],
             ])

    return txt
#! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from   quex.engine.misc.string_handling import blue_print

DistributionDir = sys.argv[1]
Version         = sys.argv[2]


template_str = open(os.environ["QUEX_PATH"] + "/adm/packager/install-builder-template.xml").read()

result = blue_print(template_str,
                    [["$$VERSION$$",               Version ],
                     ["$$QUEX_PATH$$",             os.environ["QUEX_PATH"]],
                     ["$$DISTRIBUTION_ROOT_DIR$$", DistributionDir],
                    ])

open("install-builder.xml", "w").write(result)
print "DIR: %s CONTAINS: %s" % (os.getcwd(), "install-builder.xml")

Exemple #50
0
def do(ModeDescriptionDB):
    IndentationSupportF = blackboard.required_support_indentation_count()
    BeginOfLineSupportF = blackboard.required_support_begin_of_line()

    

    LexerClassName = Setup.analyzer_class_name

    ConfigurationTemplateFile =(  QUEX_PATH \
                                + Lng["$code_base"] \
                                + "/analyzer/configuration/TXT").replace("//","/")

    txt = get_file_content_or_die(ConfigurationTemplateFile)

    # -- check if exit/entry handlers have to be active
    entry_handler_active_f = False
    exit_handler_active_f = False
    for mode in ModeDescriptionDB.values():
        entry_handler_active_f |= mode.incidence_db.has_key(E_IncidenceIDs.MODE_ENTRY)
        exit_handler_active_f  |= mode.incidence_db.has_key(E_IncidenceIDs.MODE_EXIT)

    # Buffer filler converter (0x0 means: no buffer filler converter)
    converter_new_str = "#   define QUEX_SETTING_BUFFER_FILLERS_CONVERTER_NEW " 
    if Setup.converter_user_new_func != "": 
        converter_new_str += Setup.converter_user_new_func + "()"
    else: 
        converter_new_str = "/* " + converter_new_str + " */"

    # Token repetition support
    token_repeat_test_txt = ""
    for token_id_str in blackboard.token_repetition_token_id_list:
        token_repeat_test_txt += "TokenID == %s || " % token_id_str
    if token_repeat_test_txt != "":
        token_repeat_test_txt = token_repeat_test_txt[:-3]
    else:
        token_repeat_test_txt = "false"

    if Setup.analyzer_derived_class_name != "":
        analyzer_derived_class_name = Setup.analyzer_derived_class_name
    else:
        analyzer_derived_class_name = Setup.analyzer_class_name

    txt = __switch(txt, "QUEX_OPTION_COLUMN_NUMBER_COUNTING",        Setup.count_column_number_f)        
    txt = __switch(txt, "QUEX_OPTION_COMPUTED_GOTOS",                False)
    txt = __switch(txt, "QUEX_OPTION_CONVERTER_ICONV",               Setup.converter_iconv_f)
    txt = __switch(txt, "QUEX_OPTION_CONVERTER_ICU",                 Setup.converter_icu_f)
    txt = __switch(txt, "QUEX_OPTION_INCLUDE_STACK",                 Setup.include_stack_support_f)
    txt = __switch(txt, "QUEX_OPTION_LINE_NUMBER_COUNTING",          Setup.count_line_number_f)      
    txt = __switch(txt, "QUEX_OPTION_POST_CATEGORIZER",              Setup.post_categorizer_f)
    txt = __switch(txt, "QUEX_OPTION_RUNTIME_MODE_TRANSITION_CHECK", Setup.mode_transition_check_f)
    txt = __switch(txt, "QUEX_OPTION_STRING_ACCUMULATOR",            Setup.string_accumulator_f)
    txt = __switch(txt, "QUEX_OPTION_TOKEN_POLICY_QUEUE",            Setup.token_policy == "queue")
    txt = __switch(txt, "QUEX_OPTION_TOKEN_POLICY_SINGLE",           Setup.token_policy == "single")
    txt = __switch(txt, "QUEX_OPTION_TOKEN_REPETITION_SUPPORT",      token_repeat_test_txt != "false")
    txt = __switch(txt, "QUEX_OPTION_USER_MANAGED_TOKEN_MEMORY",     Setup.token_memory_management_by_user_f)
    txt = __switch(txt, "__QUEX_OPTION_BIG_ENDIAN",                  Setup.buffer_byte_order == "big")
    txt = __switch(txt, "__QUEX_OPTION_CONVERTER_HELPER",            Setup.converter_helper_required_f)
    txt = __switch(txt, "__QUEX_OPTION_CONVERTER",                   Setup.converter_f)
    txt = __switch(txt, "QUEX_OPTION_INDENTATION_TRIGGER",           IndentationSupportF)     
    txt = __switch(txt, "__QUEX_OPTION_LITTLE_ENDIAN",               Setup.buffer_byte_order == "little")
    txt = __switch(txt, "__QUEX_OPTION_ON_ENTRY_HANDLER_PRESENT",    entry_handler_active_f)
    txt = __switch(txt, "__QUEX_OPTION_ON_EXIT_HANDLER_PRESENT",     exit_handler_active_f)
    txt = __switch(txt, "__QUEX_OPTION_PLAIN_C",                     Setup.language.upper() == "C")
    txt = __switch(txt, "__QUEX_OPTION_SUPPORT_BEGIN_OF_LINE_PRE_CONDITION", BeginOfLineSupportF)
    txt = __switch(txt, "__QUEX_OPTION_SYSTEM_ENDIAN",               Setup.byte_order_is_that_of_current_system_f)
    txt = __switch(txt, "QUEX_OPTION_BUFFER_BASED_ANALYZIS",         Setup.buffer_based_analyzis_f)
    txt = __switch(txt, "__QUEX_OPTION_ENGINE_RUNNING_ON_CODEC",     Setup.buffer_codec.name != "unicode")

    # -- token class related definitions
    token_descr = blackboard.token_type_definition

    # -- name of the character codec
    codec_name = make_safe_identifier(Setup.buffer_codec.name).lower()

    # Setup.buffer_element_size can be '-1'. This signals then that 
    # sizeof(QUEX_TYPE_CHARACTER) needs to be used. A numeric value 
    # is required here.
    character_size_str = "%i" % Setup.buffer_element_size

    def namespace(NameSpaceList):
        result = Lng.NAMESPACE_REFERENCE(NameSpaceList, TrailingDelimiterF=False)

        if len(result) == 0: return ""

        assert Setup.language.upper() != "C++" or len(result) > 2, \
               "Error while generating namespace reference '%s'" % result

        return result

    txt = blue_print(txt, 
            [
             ["$$BUFFER_LIMIT_CODE$$",          "0x%X" % Setup.buffer_limit_code],
             ["$$QUEX_SETTING_CHARACTER_CODEC$$", codec_name],
             ["$$INCLUDE_GUARD_EXTENSION$$",    get_include_guard_extension(Lng.NAMESPACE_REFERENCE(Setup.analyzer_name_space) + "__" + Setup.analyzer_class_name)],
             ["$$INITIAL_LEXER_MODE_ID$$",      "QUEX_NAME(ModeID_%s)" % blackboard.initial_mode.get_pure_text()],
             ["$$LEXER_BUILD_DATE$$",           time.asctime()],
             ["$$LEXER_CLASS_NAME$$",           LexerClassName],
             ["$$LEXER_CLASS_NAME_SAFE$$",      Setup.analyzer_name_safe],
             ["$$LEXER_DERIVED_CLASS_NAME$$",   analyzer_derived_class_name],
             ["$$MAX_MODE_CLASS_N$$",           repr(len(ModeDescriptionDB))],
             ["$$NAMESPACE_MAIN$$",             namespace(Setup.analyzer_name_space)],
             ["$$NAMESPACE_MAIN_CLOSE$$",       Lng.NAMESPACE_CLOSE(Setup.analyzer_name_space).replace("\n", "\\\n")],
             ["$$NAMESPACE_MAIN_OPEN$$",        Lng.NAMESPACE_OPEN(Setup.analyzer_name_space).replace("\n", "\\\n")],
             ["$$NAMESPACE_TOKEN$$",            namespace(token_descr.name_space)],
             ["$$NAMESPACE_TOKEN_CLOSE$$",      Lng.NAMESPACE_CLOSE(token_descr.name_space).replace("\n", "\\\n")],
             ["$$NAMESPACE_TOKEN_OPEN$$",       Lng.NAMESPACE_OPEN(token_descr.name_space).replace("\n", "\\\n")],
             ["$$PATH_TERMINATION_CODE$$",      "0x%X" % Setup.path_limit_code],
             ["$$QUEX_SETTING_BUFFER_FILLERS_CONVERTER_NEW$$", converter_new_str],
             ["$$QUEX_TYPE_CHARACTER$$",        Setup.buffer_element_type],
             ["$$QUEX_SETTING_CHARACTER_SIZE$$", character_size_str],
             ["$$QUEX_NAMESPACE_LEXEME_NULL_OPEN$$",   Lng.NAMESPACE_OPEN(Setup.lexeme_null_namespace).replace("\n", "\\\n")],
             ["$$QUEX_NAMESPACE_LEXEME_NULL_CLOSE$$",  Lng.NAMESPACE_CLOSE(Setup.lexeme_null_namespace).replace("\n", "\\\n")],
             ["$$QUEX_LEXEME_NULL$$",                  Setup.lexeme_null_full_name_cpp],
             ["$$QUEX_LEXEME_NULL_SAFE$$",             Setup.lexeme_null_name_safe],
             ["$$QUEX_LEXEME_NULL_IN_ITS_NAMESPACE$$", Setup.lexeme_null_name],
             ["$$QUEX_VERSION$$",               QUEX_VERSION],
             ["$$TOKEN_CLASS$$",                token_descr.class_name],
             ["$$TOKEN_CLASS_NAME_SAFE$$",      token_descr.class_name_safe],
             ["$$TOKEN_COLUMN_N_TYPE$$",        token_descr.column_number_type.get_pure_text()],
             ["$$TOKEN_ID_TYPE$$",              token_descr.token_id_type.get_pure_text()],
             ["$$TOKEN_LINE_N_TYPE$$",          token_descr.line_number_type.get_pure_text()],
             ["$$TOKEN_PREFIX$$",               Setup.token_id_prefix],
             ["$$TOKEN_QUEUE_SAFETY_BORDER$$",  repr(Setup.token_queue_safety_border)],
             ["$$TOKEN_QUEUE_SIZE$$",           repr(Setup.token_queue_size)],
             ["$$TOKEN_REPEAT_TEST$$",          token_repeat_test_txt],
             ["$$USER_LEXER_VERSION$$",         Setup.user_application_version_id],
             ])

    return txt
Exemple #51
0
def  get_implementation_of_mode_functions(mode, Modes):
    """Writes constructors and mode transition functions.

                  void quex::lexer::enter_EXAMPLE_MODE() { ... }

       where EXAMPLE_MODE is a lexer mode from the given lexer-modes, and
       'quex::lexer' is the lexical analysis class.
    """
    def __filter_out_abstract_modes(ModeNameList):
        """Return only those names from ModeNameList where the mode is not
        abstract. That is, it can be implemented.
        """
        result = []
        for name in ModeNameList:
            for mode in Modes:
                if mode.name == name:
                    if not mode.abstract_f(): result.append(name)
                    break
        return result

    # (*) on enter 
    on_entry_str  = "#   ifdef QUEX_OPTION_RUNTIME_MODE_TRANSITION_CHECK\n"
    on_entry_str += "    QUEX_NAME(%s).has_entry_from(FromMode);\n" % mode.name
    on_entry_str += "#   endif\n"

    code_fragment = mode.incidence_db.get(E_IncidenceIDs.MODE_ENTRY)
    if code_fragment is not None:
        on_entry_str += Lng.SOURCE_REFERENCED(code_fragment)
        if on_entry_str[-1] == "\n": on_entry_str = on_entry_str[:-1]

    # (*) on exit
    on_exit_str  = "#   ifdef QUEX_OPTION_RUNTIME_MODE_TRANSITION_CHECK\n"
    on_exit_str += "    QUEX_NAME(%s).has_exit_to(ToMode);\n" % mode.name
    on_exit_str += "#   endif\n"
    code_fragment = mode.incidence_db.get(E_IncidenceIDs.MODE_EXIT)
    if code_fragment is not None:
        on_exit_str += Lng.SOURCE_REFERENCED(code_fragment)

    # (*) on indentation
    on_indentation_str = get_on_indentation_handler(mode)

    # (*) has base mode
    if mode.has_base_mode():
        base_mode_list    = __filter_out_abstract_modes(mode.get_base_mode_name_list())
        has_base_mode_str = get_IsOneOfThoseCode(base_mode_list, CheckBaseModeF=True)
    else:
        has_base_mode_str = "    return false;"
        
    # (*) has entry from
    #     check whether the source mode is a permissible 'entry' mode
    entry_list         = __filter_out_abstract_modes(mode.entry_mode_name_list)
    has_entry_from_str = get_IsOneOfThoseCode(entry_list,
                                              ConsiderDerivedClassesF=True)

    # (*) has exit to
    #     check whether the target mode is a permissible 'exit' mode
    exit_list       = __filter_out_abstract_modes(mode.exit_mode_name_list)
    has_exit_to_str = get_IsOneOfThoseCode(exit_list,
                                           ConsiderDerivedClassesF=True)
    
    txt = blue_print(mode_function_implementation_str,
                     [
                      ["$$ENTER-PROCEDURE$$",      on_entry_str],
                      ["$$EXIT-PROCEDURE$$",       on_exit_str],
                      #
                      ["$$ON_INDENTATION-PROCEDURE$$", on_indentation_str],
                      #
                      ["$$HAS_BASE_MODE$$",        has_base_mode_str],
                      ["$$HAS_ENTRANCE_FROM$$",    has_entry_from_str],
                      ["$$HAS_EXIT_TO$$",          has_exit_to_str],
                      #
                      ["$$MODE_NAME$$",            mode.name],
                      ])
    return txt
Exemple #52
0
def  get_implementation_of_mode_functions(mode, Modes):
    """Writes constructors and mode transition functions.

                  void quex::lexer::enter_EXAMPLE_MODE() { ... }

       where EXAMPLE_MODE is a lexer mode from the given lexer-modes, and
       'quex::lexer' is the lexical analysis class.
    """
    def __filter_out_inheritable_only(ModeNameList):
        result = []
        for name in ModeNameList:
            for mode in Modes:
                if mode.name == name:
                    if mode.options["inheritable"] != "only": result.append(name)
                    break
        return result

    # (*) on enter 
    on_entry_str  = "#   ifdef QUEX_OPTION_RUNTIME_MODE_TRANSITION_CHECK\n"
    on_entry_str += "    QUEX_NAME(%s).has_entry_from(FromMode);\n" % mode.name
    on_entry_str += "#   endif\n"
    for code_info in mode.get_code_fragment_list("on_entry"):
        on_entry_str += code_info.get_code()
        if on_entry_str[-1] == "\n": on_entry_str = on_entry_str[:-1]

    # (*) on exit
    on_exit_str  = "#   ifdef QUEX_OPTION_RUNTIME_MODE_TRANSITION_CHECK\n"
    on_exit_str += "    QUEX_NAME(%s).has_exit_to(ToMode);\n" % mode.name
    on_exit_str += "#   endif\n"
    for code_info in mode.get_code_fragment_list("on_exit"):
        on_exit_str += code_info.get_code()

    # (*) on indentation
    on_indentation_str = get_on_indentation_handler(mode)

    # (*) has base mode
    if mode.has_base_mode():
        base_mode_list    = __filter_out_inheritable_only(mode.get_base_mode_name_list())
        has_base_mode_str = get_IsOneOfThoseCode(base_mode_list, CheckBaseModeF=True)
    else:
        has_base_mode_str = "    return false;"
        
    # (*) has entry from
    try:
        entry_list         = __filter_out_inheritable_only(mode.options["entry"])
        has_entry_from_str = get_IsOneOfThoseCode(entry_list,
                                                  __filter_out_inheritable_only(ConsiderDerivedClassesF=True))
        # check whether the mode we come from is an allowed mode
    except:
        has_entry_from_str = "    return true; /* default */"        

    # (*) has exit to
    try:
        exit_list       = __filter_out_inheritable_only(mode.options["exit"])
        has_exit_to_str = get_IsOneOfThoseCode(exit_list,
                                               ConsiderDerivedClassesF=True)
    except:
        has_exit_to_str = "    return true; /* default */"

    
    txt = blue_print(mode_function_implementation_str,
                     [
                      ["$$ENTER-PROCEDURE$$",      on_entry_str],
                      ["$$EXIT-PROCEDURE$$",       on_exit_str],
                      #
                      ["$$ON_INDENTATION-PROCEDURE$$", on_indentation_str],
                      #
                      ["$$HAS_BASE_MODE$$",        has_base_mode_str],
                      ["$$HAS_ENTRANCE_FROM$$",    has_entry_from_str],
                      ["$$HAS_EXIT_TO$$",          has_exit_to_str],
                      #
                      ["$$MODE_NAME$$",            mode.name],
                      ])
    return txt
Exemple #53
0
def get_skipper(TriggerSet):
    """This function implements simple 'skipping' in the sense of passing by
       characters that belong to a given set of characters--the TriggerSet.
    """
    global template_str
    assert TriggerSet.__class__.__name__ == "NumberSet"
    assert not TriggerSet.is_empty()

    LanguageDB = Setup.language_db

    skipper_index = sm_index.get()

    # Mini trigger map:  [ trigger set ] --> loop start
    # That means: As long as characters of the trigger set appear, we go to the loop start.
    transition_map = TransitionMap(
    )  # (don't worry about 'drop-out-ranges' etc.)
    transition_map.add_transition(TriggerSet, skipper_index)
    # On buffer limit code, the skipper must transit to a dedicated reloader

    goto_loop_str = [
        LanguageDB.INPUT_P_INCREMENT(),
        " goto _%s_LOOP;\n" % skipper_index
    ]
    trigger_map = transition_map.get_trigger_map()
    for i, info in enumerate(trigger_map):
        interval, target = info
        if target == E_StateIndices.DROP_OUT: continue
        trigger_map[i] = (interval, TextTransitionCode(goto_loop_str))

    iteration_code = []
    transition_block.do(iteration_code,
                        trigger_map,
                        skipper_index,
                        E_EngineTypes.ELSE,
                        GotoReload_Str="goto %s;" %
                        get_label("$reload", skipper_index))

    tmp = []
    LanguageDB.COMMENT(tmp,
                       "Skip any character in " + TriggerSet.get_utf8_string())
    comment_str = "".join(tmp)

    # Line and column number counting
    prolog = __lc_counting_replacements(prolog_txt, TriggerSet)
    epilog = __lc_counting_replacements(epilog_txt, TriggerSet)

    prolog = blue_print(prolog, [
        ["$$DELIMITER_COMMENT$$", comment_str],
        ["$$SKIPPER_INDEX$$", "%i" % skipper_index],
        ["$$INPUT_GET$$", LanguageDB.ACCESS_INPUT()],
    ])

    epilog = blue_print(
        epilog,
        [
            ["$$INPUT_P_INCREMENT$$",
             LanguageDB.INPUT_P_INCREMENT()],
            ["$$INPUT_P_DECREMENT$$",
             LanguageDB.INPUT_P_DECREMENT()],
            [
                "$$IF_INPUT_EQUAL_DELIMITER_0$$",
                LanguageDB.IF_INPUT("==", "SkipDelimiter$$SKIPPER_INDEX$$[0]")
            ],
            ["$$ENDIF$$", LanguageDB.END_IF()],
            ["$$LOOP_REENTRANCE$$",
             LanguageDB.LABEL(skipper_index)],
            ["$$BUFFER_LIMIT_CODE$$", LanguageDB.BUFFER_LIMIT_CODE],
            ["$$RELOAD$$", get_label("$reload", skipper_index)],
            [
                "$$DROP_OUT_DIRECT$$",
                get_label("$drop-out", skipper_index, U=True)
            ],
            ["$$SKIPPER_INDEX$$", "%i" % skipper_index],
            [
                "$$LABEL_TERMINAL_EOF$$",
                "QUEX_LABEL(%i)" % get_address("$terminal-EOF", U=True)
            ],
            [
                "$$LABEL_REF_AFTER_RELOAD$$",
                "QUEX_LABEL(%i)" %
                get_address("$skipper-reload", skipper_index, U=True)
            ],
            ["$$GOTO_TERMINAL_EOF$$",
             get_label("$terminal-EOF", U=True)],
            [
                "$$LABEL_AFTER_RELOAD$$",
                get_label("$skipper-reload", skipper_index)
            ],
            # When things were skipped, no change to acceptance flags or modes has
            # happend. One can jump immediately to the start without re-entry preparation.
            ["$$GOTO_START$$", get_label("$start", U=True)],
            ["$$MARK_LEXEME_START$$",
             LanguageDB.LEXEME_START_SET()],
        ])

    code = [prolog]
    code.extend(iteration_code)
    code.append(epilog)

    local_variable_db = {}
    variable_db.enter(local_variable_db,
                      "reference_p",
                      Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")

    return code, local_variable_db
Exemple #54
0
def get_skipper(OpenerSequence, CloserSequence, Mode=None, IndentationCounterTerminalID=None, OnSkipRangeOpenStr=""):
    assert OpenerSequence.__class__  == list
    assert len(OpenerSequence)       >= 1
    assert map(type, OpenerSequence) == [int] * len(OpenerSequence)
    assert CloserSequence.__class__  == list
    assert len(CloserSequence)       >= 1
    assert map(type, CloserSequence) == [int] * len(CloserSequence)
    assert OpenerSequence != CloserSequence

    LanguageDB    = Setup.language_db

    skipper_index = sm_index.get()

    opener_str, opener_comment_str = get_character_sequence(OpenerSequence)
    opener_length = len(OpenerSequence)
    closer_str, closer_comment_str = get_character_sequence(CloserSequence)
    closer_length = len(CloserSequence)

    if not end_delimiter_is_subset_of_indentation_counter_newline(Mode, CloserSequence):
        goto_after_end_of_skipping_str = LanguageDB.GOTO(E_StateIndices.ANALYZER_REENTRY)
    else:
        # If there is indentation counting involved, then the counter's terminal id must
        # be determined at this place.
        assert IndentationCounterTerminalID is not None
        # If the ending delimiter is a subset of what the 'newline' pattern triggers 
        # in indentation counting => move on to the indentation counter.
        goto_after_end_of_skipping_str = LanguageDB.GOTO_TERMINAL(IndentationCounterTerminalID)

    if OnSkipRangeOpenStr != "": on_skip_range_open_str = OnSkipRangeOpenStr
    else:                        on_skip_range_open_str = get_on_skip_range_open(Mode, CloserSequence)

    local_variable_db = {}
    variable_db.enter(local_variable_db, "reference_p", Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")
    # variable_db.enter(local_variable_db, "text_end")
    variable_db.enter(local_variable_db, "counter")
    variable_db.enter(local_variable_db, "Skipper%i_Opener",    "{ %s }" % opener_str, ElementN=opener_length, 
                                         Index = skipper_index)
    variable_db.enter(local_variable_db, "Skipper%i_OpenerEnd", 
                                         "Skipper%i_Opener + (ptrdiff_t)%i" % (skipper_index, opener_length),
                                         Index = skipper_index) 
    variable_db.enter(local_variable_db, "Skipper%i_Opener_it", "0x0", 
                                         Index = skipper_index) 
    variable_db.enter(local_variable_db, "Skipper%i_Closer",    "{ %s }" % closer_str, ElementN=closer_length, 
                                         Index = skipper_index) 
    variable_db.enter(local_variable_db, "Skipper%i_CloserEnd", 
                                         "Skipper%i_Closer + (ptrdiff_t)%i" % (skipper_index, closer_length),
                                         Index = skipper_index) 
    variable_db.enter(local_variable_db, "Skipper%i_Closer_it", "0x0", 
                                         Index = skipper_index) 

   
    reference_p_def = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    reference_p_def = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
    before_reload   = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                      "                                - reference_p));\n" 
    after_reload    = "        __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    if CloserSequence[-1] == ord('\n'):
        end_procedure  = "       __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
        end_procedure += "       __QUEX_IF_COUNT_COLUMNS_SET((size_t)1);\n"
    else:
        end_procedure = "        __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                        "                                    - reference_p));\n" 

    code_str = blue_print(template_str,
                          [
                           ["$$SKIPPER_INDEX$$",   __nice(skipper_index)],
                           #
                           ["$$OPENER_LENGTH$$",                  "%i" % opener_length],
                           ["$$INPUT_P_INCREMENT$$",              LanguageDB.INPUT_P_INCREMENT()],
                           ["$$INPUT_P_DECREMENT$$",              LanguageDB.INPUT_P_DECREMENT()],
                           ["$$INPUT_GET$$",                      LanguageDB.ACCESS_INPUT()],
                           ["$$IF_INPUT_EQUAL_DELIMITER_0$$",     LanguageDB.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")],
                           ["$$ENDIF$$",                          LanguageDB.END_IF()],
                           ["$$ENTRY$$",                          LanguageDB.LABEL(skipper_index)],
                           ["$$RELOAD$$",                         get_label("$reload", skipper_index)],
                           ["$$GOTO_AFTER_END_OF_SKIPPING$$",     goto_after_end_of_skipping_str], 
                           ["$$GOTO_RELOAD$$",                    get_label("$reload", skipper_index)],
                           ["$$INPUT_P_TO_LEXEME_START$$",        LanguageDB.INPUT_P_TO_LEXEME_START()],
                           # When things were skipped, no change to acceptance flags or modes has
                           # happend. One can jump immediately to the start without re-entry preparation.
                           ["$$GOTO_ENTRY$$",                     LanguageDB.GOTO(skipper_index)],
                           ["$$MARK_LEXEME_START$$",              LanguageDB.LEXEME_START_SET()],
                           ["$$ON_SKIP_RANGE_OPEN$$",             on_skip_range_open_str],
                           #
                           ["$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$", reference_p_def],
                           ["$$LC_COUNT_IN_LOOP$$",                     line_column_counter_in_loop()],
                           ["$$LC_COUNT_END_PROCEDURE$$",               end_procedure],
                           ["$$LC_COUNT_BEFORE_RELOAD$$",               before_reload],
                           ["$$LC_COUNT_AFTER_RELOAD$$",                after_reload],
                          ])

    return code_str, local_variable_db
Exemple #55
0
 def LEXEME_MACRO_SETUP(self):
     return blue_print(cpp.lexeme_macro_setup, [
         ["$$LEXEME_LENGTH$$",  self.LEXEME_LENGTH()],
         ["$$INPUT_P$$",        self.INPUT_P()],
     ])
Exemple #56
0
def _do(Descr):
    # The following things must be ensured before the function is called
    assert Descr is not None
    assert Descr.__class__.__name__ == "TokenTypeDescriptor"

    ## ALLOW: Descr.get_member_db().keys() == empty

    TemplateFile = QUEX_PATH \
                   + Lng["$code_base"] \
                   + Lng["$token_template_file"]

    TemplateIFile = QUEX_PATH \
                   + Lng["$code_base"] \
                   + Lng["$token_template_i_file"]

    template_str   = open_file_or_die(TemplateFile, Mode="rb").read()
    template_i_str = open_file_or_die(TemplateIFile, Mode="rb").read()
    
    virtual_destructor_str = ""
    if Descr.open_for_derivation_f: virtual_destructor_str = "virtual "

    if Descr.copy is None:
        # Default copy operation: Plain Copy of token memory
        copy_str = "__QUEX_STD_memcpy((void*)__this, (void*)__That, sizeof(QUEX_TYPE_TOKEN));\n"
    else:
        copy_str = Lng.SOURCE_REFERENCED(Descr.copy)

    if Descr.take_text is None:
        take_text_str = "return true;\n" 
    else:
        take_text_str = Lng.SOURCE_REFERENCED(Descr.take_text)

    include_guard_extension_str = get_include_guard_extension(
                                        Lng.NAMESPACE_REFERENCE(Descr.name_space) 
                                        + "__" + Descr.class_name)

    # In case of plain 'C' the class name must incorporate the namespace (list)
    token_class_name = Descr.class_name
    if Setup.language == "C":
        token_class_name = Setup.token_class_name_safe

    converter_declaration_include,   \
    converter_implementation_include, \
    converter_string,                 \
    converter_wstring                 = __get_converter_configuration(include_guard_extension_str)

    extra_at_begin_str = lexeme_null_declaration()
    extra_at_end_str   = ""
    if Setup.token_class_only_f:
        extra_at_begin_str = QUEX_NAME_TOKEN_define_str % include_guard_extension_str \
                             + extra_at_begin_str
        extra_at_end_str   = QUEX_NAME_TOKEN_undef_str % include_guard_extension_str \
                             + extra_at_end_str

    namespace_open, namespace_close = __namespace_brackets()
    helper_variable_replacements = [
              ["$INCLUDE_CONVERTER_DECLARATION",    converter_declaration_include],
              ["$INCLUDE_CONVERTER_IMPLEMENTATION", converter_implementation_include],
              ["$CONVERTER_STRING",                 converter_string],
              ["$CONVERTER_WSTRING",                converter_wstring],
              ["$NAMESPACE_CLOSE",                  namespace_close],
              ["$NAMESPACE_OPEN",                   namespace_open],
              ["$TOKEN_CLASS",                      token_class_name],
    ]

    txt = blue_print(template_str, 
            [
              ["$$EXTRA_AT_BEGIN$$",  extra_at_begin_str],
              ["$$EXTRA_AT_END$$",    extra_at_end_str],
            ])
    txt = blue_print(txt,
             [
              ["$$BODY$$",                    Lng.SOURCE_REFERENCED(Descr.body)],
              ["$$CONSTRUCTOR$$",             Lng.SOURCE_REFERENCED(Descr.constructor)],
              ["$$COPY$$",                    copy_str],
              ["$$DESTRUCTOR$$",              Lng.SOURCE_REFERENCED(Descr.destructor)],
              ["$$DISTINCT_MEMBERS$$",        get_distinct_members(Descr)],
              ["$$FOOTER$$",                  Lng.SOURCE_REFERENCED(Descr.footer)],
              ["$$FUNC_TAKE_TEXT$$",          take_text_str],
              ["$$HEADER$$",                  Lng.SOURCE_REFERENCED(Descr.header)],
              ["$$INCLUDE_GUARD_EXTENSION$$", include_guard_extension_str],
              ["$$NAMESPACE_CLOSE$$",         Lng.NAMESPACE_CLOSE(Descr.name_space)],
              ["$$NAMESPACE_OPEN$$",          Lng.NAMESPACE_OPEN(Descr.name_space)],
              ["$$QUICK_SETTERS$$",           get_quick_setters(Descr)],
              ["$$SETTERS_GETTERS$$",         get_setter_getter(Descr)],
              ["$$TOKEN_REPETITION_N_GET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_get)],
              ["$$TOKEN_REPETITION_N_SET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_set)],
              ["$$UNION_MEMBERS$$",           get_union_members(Descr)],
              ["$$VIRTUAL_DESTRUCTOR$$",      virtual_destructor_str],
              ["$$TOKEN_CLASS_NAME_SAFE$$",   Descr.class_name_safe],
             ])

    txt   = blue_print(txt, helper_variable_replacements)

    if Setup.language.upper() != "C++" and Setup.token_class_only_f:
        extra_at_begin_str += local_strlen_str % (Descr.class_name_safe, Setup.buffer_element_type, Setup.buffer_element_type)

    txt_i = blue_print(template_i_str, 
            [
              ["$$EXTRA_AT_BEGIN$$",  extra_at_begin_str],
              ["$$EXTRA_AT_END$$",    extra_at_end_str],
            ])
    txt_i = blue_print(txt_i, 
                       [
                        ["$$CONSTRUCTOR$$",             Lng.SOURCE_REFERENCED(Descr.constructor)],
                        ["$$COPY$$",                    copy_str],
                        ["$$DESTRUCTOR$$",              Lng.SOURCE_REFERENCED(Descr.destructor)],
                        ["$$FOOTER$$",                  Lng.SOURCE_REFERENCED(Descr.footer)],
                        ["$$FUNC_TAKE_TEXT$$",          take_text_str],
                        ["$$TOKEN_CLASS_HEADER$$",      Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                        ["$$INCLUDE_GUARD_EXTENSION$$", include_guard_extension_str],
                        ["$$NAMESPACE_OPEN$$",          Lng.NAMESPACE_OPEN(Descr.name_space)],
                        ["$$NAMESPACE_CLOSE$$",         Lng.NAMESPACE_CLOSE(Descr.name_space)],
                        ["$$TOKEN_REPETITION_N_GET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_get)],
                        ["$$TOKEN_REPETITION_N_SET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_set)],
                        ["$$TOKEN_CLASS_NAME_SAFE$$",   Descr.class_name_safe],
                       ])


    txt_i = blue_print(txt_i, helper_variable_replacements)

    return txt, txt_i
Exemple #57
0
def get_skipper(OpenerSequence, CloserSequence, CloserPattern, ModeName,
                OnSkipRangeOpen, DoorIdAfter):
    assert len(OpenerSequence) >= 1
    assert len(CloserSequence) >= 1
    assert OpenerSequence != CloserSequence

    skipper_index = sm_index.get()
    skipper_door_id = dial_db.new_door_id(skipper_index)

    opener_str, opener_comment_str = get_character_sequence(OpenerSequence)
    opener_length = len(OpenerSequence)
    closer_str, closer_comment_str = get_character_sequence(CloserSequence)
    closer_length = len(CloserSequence)

    variable_db.require("reference_p",
                        Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")
    variable_db.require("counter")
    variable_db.require_array("Skipper%i_Opener",
                              Initial="{ %s }" % opener_str,
                              ElementN=opener_length,
                              Index=skipper_index)
    variable_db.require("Skipper%i_OpenerEnd",
                        "Skipper%i_Opener + (ptrdiff_t)%i" %
                        (skipper_index, opener_length),
                        Index=skipper_index)
    variable_db.require("Skipper%i_Opener_it", "0x0", Index=skipper_index)
    variable_db.require_array("Skipper%i_Closer",
                              Initial="{ %s }" % closer_str,
                              ElementN=closer_length,
                              Index=skipper_index)
    variable_db.require("Skipper%i_CloserEnd",
                        "Skipper%i_Closer + (ptrdiff_t)%i" %
                        (skipper_index, closer_length),
                        Index=skipper_index)
    variable_db.require("Skipper%i_Closer_it", "0x0", Index=skipper_index)

    reference_p_def = "    __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
    before_reload   = "    __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                      "                                - reference_p));\n"
    after_reload = "        __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"

    if CloserSequence[-1] == ord('\n'):
        end_procedure = "       __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
        end_procedure += "       __QUEX_IF_COUNT_COLUMNS_SET((size_t)1);\n"
    else:
        end_procedure = "        __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
                        "                                    - reference_p));\n"

    reload_door_id = dial_db.new_door_id()
    on_skip_range_open = get_on_skip_range_open(OnSkipRangeOpen,
                                                CloserPattern,
                                                NestedF=True)

    code_str = blue_print(
        template_str,
        [
            ["$$SKIPPER_INDEX$$", __nice(skipper_index)],
            #
            ["$$OPENER_LENGTH$$", "%i" % opener_length],
            ["$$INPUT_P_INCREMENT$$",
             Lng.INPUT_P_INCREMENT()],
            ["$$INPUT_P_DECREMENT$$",
             Lng.INPUT_P_DECREMENT()],
            ["$$INPUT_GET$$", Lng.ACCESS_INPUT()],
            [
                "$$IF_INPUT_EQUAL_DELIMITER_0$$",
                Lng.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")
            ],
            ["$$ENDIF$$", Lng.END_IF()],
            ["$$ENTRY$$", Lng.LABEL(skipper_door_id)],
            ["$$RELOAD$$",
             dial_db.get_label_by_door_id(reload_door_id)],
            ["$$GOTO_AFTER_END_OF_SKIPPING$$",
             Lng.GOTO(DoorIdAfter)],
            ["$$GOTO_RELOAD$$", Lng.GOTO(reload_door_id)],
            ["$$INPUT_P_TO_LEXEME_START$$",
             Lng.INPUT_P_TO_LEXEME_START()],
            # When things were skipped, no change to acceptance flags or modes has
            # happend. One can jump immediately to the start without re-entry preparation.
            ["$$GOTO_ENTRY$$", Lng.GOTO(skipper_door_id)],
            ["$$MARK_LEXEME_START$$",
             Lng.LEXEME_START_SET()],
            ["$$ON_SKIP_RANGE_OPEN$$", on_skip_range_open],
            #
            ["$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$", reference_p_def],
            ["$$LC_COUNT_IN_LOOP$$",
             line_column_counter_in_loop()],
            ["$$LC_COUNT_END_PROCEDURE$$", end_procedure],
            ["$$LC_COUNT_BEFORE_RELOAD$$", before_reload],
            ["$$LC_COUNT_AFTER_RELOAD$$", after_reload],
        ])

    return [code_str]
Exemple #58
0
def do(setup):
    """Creates a file of token-ids from a given set of names.
       Creates also a function:

       const string& $$token$$::map_id_to_name().
    """
    global file_str
    LanguageDB = Setup.language_db

    __propose_implicit_token_definitions()

    for standard_token_id in standard_token_id_list:
        assert token_id_db.has_key(standard_token_id)

    assert blackboard.token_type_definition is not None, \
           "Token type has not been defined yet, see $QUEX_PATH/quex/core.py how to\n" + \
           "handle this."

    # (*) Token ID File ________________________________________________________________
    #
    #     The token id file can either be specified as database of
    #     token-id names, or as a file that directly assigns the token-ids
    #     to variables. If the flag '--user-token-id-file' is defined, then
    #     then the token-id file is provided by the user. Otherwise, the
    #     token id file is created by the token-id maker.
    #
    #     The token id maker considers the file passed by the option '-t'
    #     as the database file and creates a C++ file with the output filestem
    #     plus the suffix "--token-ids". Note, that the token id file is a
    #     header file.
    #
    if len(token_id_db.keys()) == len(standard_token_id_list):
        token_id_str = "%sTERMINATION and %sUNINITIALIZED" % \
                       (setup.token_id_prefix_plain, setup.token_id_prefix_plain) 
        # TERMINATION + UNINITIALIZED = 2 token ids. If they are the only ones nothing can be done.
        error_msg("Only token ids %s are defined.\n" % token_id_str + \
                  "Quex refuses to proceed. Please, use the 'token { ... }' section to\n" + \
                  "specify at least one other token id.")

    #______________________________________________________________________________________
    L = max(map(lambda name: len(name), token_id_db.keys()))
    def space(Name):
        return " " * (L - len(Name))

    # -- define values for the token ids
    def define_this(txt, token):
        if setup.language == "C":
            txt.append("#define %s%s %s((QUEX_TYPE_TOKEN_ID)%i)\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))
        else:
            txt.append("const QUEX_TYPE_TOKEN_ID %s%s%s = ((QUEX_TYPE_TOKEN_ID)%i);\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))

    if setup.token_id_foreign_definition_file != "":
        token_id_txt = ["#include \"%s\"\n" % Setup.get_file_reference(setup.token_id_foreign_definition_file)]

    else:
        if setup.language == "C": 
            prolog = ""
            epilog = ""
        else:
            prolog = LanguageDB.NAMESPACE_OPEN(setup.token_id_prefix_name_space)
            epilog = LanguageDB.NAMESPACE_CLOSE(setup.token_id_prefix_name_space)

        token_id_txt = [prolog]

        # Assign values to tokens with no numeric identifier
        # NOTE: This has not to happen if token's are defined by the user's provided file.
        i = setup.token_id_counter_offset
        # Take the 'dummy_name' only to have the list sorted by name. The key 'dummy_name' 
        # may contain '--' to indicate a unicode value, so do not use it as name.
        for dummy_name, token in sorted(token_id_db.items()):
            if token.number is None: 
                while __is_token_id_occupied(i):
                    i += 1
                token.number = i; 

            define_this(token_id_txt, token)

        # Double check that no token id appears twice
        # Again, this can only happen, if quex itself produced the numeric values for the token
        token_list = token_id_db.values()
        for i, x in enumerate(token_list):
            for y in token_list[i+1:]:
                if x.number != y.number: continue
                error_msg("Token id '%s'" % x.name, x.file_name, x.line_n, DontExitF=True)
                error_msg("and token id '%s' have same numeric value '%s'." \
                          % (y.name, x.number), y.file_name, y.line_n, DontExitF=True)
                          
        token_id_txt.append(epilog)

    content = blue_print(file_str,
                         [["$$TOKEN_ID_DEFINITIONS$$",        "".join(token_id_txt)],
                          ["$$DATE$$",                        time.asctime()],
                          ["$$TOKEN_CLASS_DEFINITION_FILE$$", Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                          ["$$TOKEN_PREFIX$$",                setup.token_id_prefix], 
                          ["$$INCLUDE_GUARD_EXT$$",           get_include_guard_extension(         \
                                                                  Setup.analyzer_name_safe.upper() \
                                                                + "__"                             \
                                                                + Setup.token_class_name_safe.upper())], 
                         ])

    return content