def get_main_function(tm0, TranstionTxt, Codec):
    def indent(Txt, N):
        return (" " * N) + (Txt.replace("\n", "\n" + (" " * N)))

    input_preperation = get_read_preparation(codec)

    entry_list = [(0 if interval.begin < 0 else interval.begin, target)
                  for interval, target in tm0]
    entry_list.append((tm0[-1][0].begin, -1))
    entry_list.append((0x1FFFF, -1))
    expected_array = [
        "        { 0x%06X, %s },\n" % (begin, target)
        for begin, target in entry_list
    ]

    txt = main_template.replace("$$ENTRY_LIST$$", "".join(expected_array))
    txt = txt.replace("$$TRANSITION$$", indent(TranstionTxt, 4))
    txt = txt.replace("$$PREPARE_INPUT$$", input_preperation)

    door_id = DoorID.incidence(E_IncidenceIDs.BAD_LEXATOM, dial_db)
    txt = txt.replace("$$ON_BAD_LEXATOM$$", Lng.LABEL_STR(door_id, dial_db))
    txt = txt.replace("$$DROP_OUT_MINUS_1$$",
                      Lng.LABEL_STR(DoorID.drop_out(-1, dial_db)))

    txt = txt.replace("MATCH_FAILURE", "((int)-1)")
    return txt
Example #2
0
def do_token_class_info():
    token_class = Lng.NAME_IN_NAMESPACE(Setup.token_class_name, Setup.token_class_name_space) 
    info_list = [
        ## "  --token-id-prefix  %s" % Setup.token_id_prefix,
        "  --token-class-file %s" % Setup.output_token_class_file,
        "  --token-class      %s" % token_class,
        "  --token-id-type    %s" % Setup.token_id_type,
        "  --lexatom-type     %s" % Setup.lexatom.type,
    ]
    if token_db.support_repetition():
        info_list.append("  --token-class-support-repetition")
    if token_db.support_take_text():
        info_list.append("  --token-class-support-take-text")

    print "info: Analyzers using this token class must be generated with"
    print "info:"
    for line in info_list:
        print "info:    %s" % line
    print "info:"
    print "info: Header: \"%s\"" % token_db.token_type_definition.get_file_name() 
    print "info: Source: \"%s\"" % Setup.output_token_class_file_implementation

    comment = ["<<<QUEX-OPTIONS>>>\n"]
    for line in info_list:
        if line.find("--token-class-file") != -1: continue
        comment.append("%s\n" % line)
    comment.append("<<<QUEX-OPTIONS>>>")
    return Lng.ML_COMMENT("".join(comment), IndentN=0)
Example #3
0
def get_on_indentation_handler(Mode):
    # 'on_dedent' and 'on_n_dedent cannot be defined at the same time.
    assert not (    E_IncidenceIDs.INDENTATION_DEDENT   in Mode.incidence_db \
                and E_IncidenceIDs.INDENTATION_N_DEDENT in Mode.incidence_db)

    # A mode that deals only with the default indentation handler relies
    # on what is defined in '$QUEX_PATH/analyzer/member/on_indentation.i'
    if Mode.incidence_db.default_indentation_handler_f():
        return "    return;"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_INDENT)
    if code_fragment is not None:
        on_indent_str = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_indent_str = "self_send(__QUEX_SETTING_TOKEN_ID_INDENT);"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_NODENT)
    if code_fragment is not None:
        on_nodent_str = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        on_nodent_str = "self_send(__QUEX_SETTING_TOKEN_ID_NODENT);"

    on_dedent_str = ""
    on_n_dedent_str = ""
    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_DEDENT)
    if code_fragment is not None:
        on_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_N_DEDENT)
    if code_fragment is not None:
        on_n_dedent_str = Lng.SOURCE_REFERENCED(code_fragment)

    if (not on_dedent_str) and (not on_n_dedent_str):
        # If no 'on_dedent' and no 'on_n_dedent' is defined ...
        on_dedent_str = ""
        on_n_dedent_str = "#if defined(QUEX_OPTION_TOKEN_REPETITION_SUPPORT)\n"
        on_n_dedent_str += "    self_send_n(ClosedN, __QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#else\n"
        on_n_dedent_str += "    while( start-- != stack->back ) self_send(__QUEX_SETTING_TOKEN_ID_DEDENT);\n"
        on_n_dedent_str += "#endif\n"

    code_fragment = Mode.incidence_db.get(E_IncidenceIDs.INDENTATION_ERROR)
    if code_fragment is not None:
        on_indentation_error = Lng.SOURCE_REFERENCED(code_fragment)
    else:
        # Default: Blow the program if there is an indentation error.
        on_indentation_error = 'QUEX_ERROR_EXIT("Lexical analyzer mode \'%s\': indentation error detected!\\n"' \
                               % Mode.name + \
                               '                "No \'on_indentation_error\' handler has been specified.\\n");'

    # Note: 'on_indentation_bad' is applied in code generation for
    #       indentation counter in 'indentation_counter.py'.
    txt = blue_print(
        on_indentation_str,
        [["$$INDENT-PROCEDURE$$", on_indent_str],
         ["$$NODENT-PROCEDURE$$", on_nodent_str],
         ["$$DEDENT-PROCEDURE$$", on_dedent_str],
         ["$$N-DEDENT-PROCEDURE$$", on_n_dedent_str],
         ["$$INDENTATION-ERROR-PROCEDURE$$", on_indentation_error]])
    return txt
Example #4
0
    def __adorn_user_code(self, Code, MatchF):
        """Adorns user code with:
           -- storage of last character, if required for 'begin of line'
              pre-context.
           -- storage of the terminating zero, if the lexeme is required
              as a zero-terminated string.
           -- add the 'on_match' event handler in front, if match is relevant.
           -- adding source reference information.
        """
        code_user = "******" % (Lng._SOURCE_REFERENCE_BEGIN(
            Code.sr), pretty_code(
                Code.get_code()), Lng._SOURCE_REFERENCE_END())

        lexeme_begin_f, \
        terminating_zero_f = self.get_lexeme_flags(Code)

        txt_terminating_zero = Lng.LEXEME_TERMINATING_ZERO_SET(
            terminating_zero_f)

        if MatchF: txt_on_match = Lng.SOURCE_REFERENCED(self.on_match)
        else: txt_on_match = ""

        result = "".join([
            self.txt_store_last_character,
            txt_terminating_zero,
            txt_on_match,
            "{\n",
            code_user,
            "\n}\n",
        ])

        return lexeme_begin_f, terminating_zero_f, result
Example #5
0
    def implement(self):
        L = len(self.sub_map)
        assert L != 0

        tm = [
            (interval, "".join(transition.do(interval, target)))
            for interval, target in self.sub_map
        ]

        if len(tm) == 1:
            return Lng.COMPARISON_SEQUENCE(tm, None)

        tm, default = ComparisonSequence.optimize(tm)

        # The buffer limit code is appears extreme seldomly
        # => if it's there, make sure that it is tested at last. 
        #    (This might require to reverse the trigger map.)
        # The 'BLC' might actually no longer occur in the optimized map. Thus, 
        # search for it in the original transition map.
        blc_index = TransitionMap.bisect(self.sub_map, Setup.buffer_limit_code)
        if blc_index is not None and blc_index < L / 2:
            def get_decision(interval, i, L):
                if   i == L-1:             return Lng.ELSE_SIMPLE
                elif interval.size() == 1: return Lng.IF_X("==", interval.begin, i, L)
                else:                      return Lng.IF_X(">=", interval.begin, i, L)

            tm = list(reversed(tm))
        else:
            def get_decision(interval, i, L):
                if   i == L-1:             return Lng.ELSE_SIMPLE
                elif interval.size() == 1: return Lng.IF_X("==", interval.begin, i, L)
                else:                      return Lng.IF_X("<",  interval.end,   i, L)

        if default is not None: tm.append(default)
        return Lng.COMPARISON_SEQUENCE(tm, get_decision)
Example #6
0
def do_implementation(ModeDB):

    FileTemplate = os.path.normpath(QUEX_PATH + Lng["$code_base"] +
                                    "/analyzer/TXT-Cpp.i")
    func_txt = get_file_content_or_die(FileTemplate)

    func_txt = blue_print(func_txt, [
        [
            "$$CONSTRUCTOR_EXTENSTION$$",
            Lng.SOURCE_REFERENCED(blackboard.class_constructor_extension)
        ],
        [
            "$$CONVERTER_HELPER_I$$",
            Setup.get_file_reference(Setup.output_buffer_codec_header_i)
        ],
        [
            "$$CONSTRUCTOR_MODE_DB_INITIALIZATION_CODE$$",
            get_constructor_code(ModeDB.values())
        ],
        [
            "$$MEMENTO_EXTENSIONS_PACK$$",
            Lng.SOURCE_REFERENCED(blackboard.memento_pack_extension)
        ],
        [
            "$$MEMENTO_EXTENSIONS_UNPACK$$",
            Lng.SOURCE_REFERENCED(blackboard.memento_unpack_extension)
        ],
    ])
    return func_txt
Example #7
0
def do(TheAnalyzer):
    """Generate source code for a given state machine 'SM'.
    """
    Lng.register_analyzer(TheAnalyzer)

    assert id(Lng.analyzer) == id(TheAnalyzer)

    # (*) Init State must be first!
    txt = []
    state_coder.do(txt, TheAnalyzer.state_db[TheAnalyzer.init_state_index],
                   TheAnalyzer)

    # (*) Second: The drop-out catcher, since it is referenced the most.
    #     (Is implemented entirely by 'entry')
    code_drop_out_catcher(txt, TheAnalyzer)

    # (*) Code the Mega States (implementing multiple states in one)
    for state in TheAnalyzer.mega_state_list:
        mega_state_coder.do(txt, state, TheAnalyzer)

    # (*) All other (normal) states (sorted by their frequency of appearance)
    for state in remaining_non_mega_state_iterable(TheAnalyzer):
        state_coder.do(txt, state, TheAnalyzer)

    Lng.unregister_analyzer()
    return txt
Example #8
0
def do(TheAnalyzer):
    """Generate source code for a given state machine 'SM'.
    """
    Lng.register_analyzer(TheAnalyzer)
    
    assert id(Lng.analyzer) == id(TheAnalyzer)

    # (*) Init State must be first!
    txt = []
    state_coder.do(txt, TheAnalyzer.state_db[TheAnalyzer.init_state_index], TheAnalyzer)

    # (*) Second: The drop-out catcher, since it is referenced the most.
    #     (Is implemented entirely by 'entry')
    code_drop_out_catcher(txt, TheAnalyzer)

    # (*) Code the Mega States (implementing multiple states in one)
    for state in TheAnalyzer.mega_state_list:
        mega_state_coder.do(txt, state, TheAnalyzer)

    # (*) All other (normal) states (sorted by their frequency of appearance)
    for state in remaining_non_mega_state_iterable(TheAnalyzer):
        state_coder.do(txt, state, TheAnalyzer) 

    Lng.unregister_analyzer()
    return txt
Example #9
0
def __get_token_id_definition_txt():
    
    assert len(Setup.token_id_foreign_definition_file) == 0

    def define_this(txt, token, L):
        assert token.number is not None
        if Setup.language == "C":
            txt.append("#define %s%s %s((QUEX_TYPE_TOKEN_ID)%i)\n" \
                       % (Setup.token_id_prefix_plain, token.name, space(L, token.name), token.number))
        else:
            txt.append("const QUEX_TYPE_TOKEN_ID %s%s%s = ((QUEX_TYPE_TOKEN_ID)%i);\n" \
                       % (Setup.token_id_prefix_plain, token.name, space(L, token.name), token.number))

    if Setup.language == "C": 
        prolog = ""
        epilog = ""
    else:
        prolog = Lng.NAMESPACE_OPEN(Setup.token_id_prefix_name_space)
        epilog = Lng.NAMESPACE_CLOSE(Setup.token_id_prefix_name_space)

    # Considering 'items' allows to sort by name. The name is the 'key' in 
    # the dictionary 'token_id_db'.
    L      = max(map(len, token_id_db.iterkeys()))
    result = [prolog]
    for dummy, token in sorted(token_id_db.iteritems()):
        define_this(result, token, L)
    result.append(epilog)

    return result
Example #10
0
def __member(TypeCode, MaxTypeNameL, VariableName, MaxVariableNameL, IndentationOffset=""):
    my_def  = Lng._SOURCE_REFERENCE_BEGIN(TypeCode.sr)
    my_def += IndentationOffset
    my_def += Lng.CLASS_MEMBER_DEFINITION(TypeCode.get_pure_text(), MaxTypeNameL, 
                                          VariableName)
    my_def += Lng._SOURCE_REFERENCE_END(TypeCode.sr)
    return my_def
Example #11
0
def _character_converters():
    if isinstance(Setup.buffer_encoding, EncodingTrafoBySplit):
        encoding_name = Lng.SAFE_IDENTIFIER(Setup.adapted_encoding_name())
        return Lng.template_converter_character_functions_standard(
            encoding_name)
    else:
        return _table_character_converters(Setup.buffer_encoding)
Example #12
0
    def __adorn_user_code(self, Code, MatchF):
        """Adorns user code with:
           -- storage of last character, if required for 'begin of line'
              pre-context.
           -- storage of the terminating zero, if the lexeme is required
              as a zero-terminated string.
           -- add the 'on_match' event handler in front, if match is relevant.
           -- adding source reference information.
        """
        code_user = "******" % (
            Lng._SOURCE_REFERENCE_BEGIN(Code.sr),
            pretty_code(Code.get_code()),
            Lng._SOURCE_REFERENCE_END()
        )

        lexeme_begin_f, \
        terminating_zero_f = self.get_lexeme_flags(Code)

        txt_terminating_zero = Lng.LEXEME_TERMINATING_ZERO_SET(terminating_zero_f)

        if MatchF: txt_on_match = Lng.SOURCE_REFERENCED(self.on_match)
        else:      txt_on_match = ""

        result = "".join([
            self.txt_store_last_character,
            txt_terminating_zero,
            txt_on_match,
            "{\n",
            code_user,
            "\n}\n",
        ])

        return lexeme_begin_f, terminating_zero_f, result
Example #13
0
    def action(ThePattern, PatternName):
        txt = []
        if ThePattern.sm_bipd_to_be_reversed is not None:
            terminal_factory.do_bipd_entry_and_return(txt, pattern)

        txt.append("%s\n" % Lng.STORE_LAST_CHARACTER(
            blackboard.required_support_begin_of_line()))
        txt.append("%s\n" % Lng.LEXEME_TERMINATING_ZERO_SET(True))
        txt.append('printf("%19s  \'%%s\'\\n", Lexeme); fflush(stdout);\n' %
                   PatternName)

        if "->1" in PatternName:
            txt.append(
                "me->current_analyzer_function = QUEX_NAME(M_analyzer_function);\n"
            )
        elif "->2" in PatternName:
            txt.append(
                "me->current_analyzer_function = QUEX_NAME(M2_analyzer_function);\n"
            )

        if "CONTINUE" in PatternName: txt.append("")
        elif "STOP" in PatternName:
            txt.append(
                "QUEX_NAME(MF_error_code_set_if_first)(me, E_Error_UnitTest_Termination); return;\n"
            )
        else:
            txt.append("return;\n")

        txt.append(
            "%s\n" %
            Lng.GOTO(DoorID.continue_with_on_after_match(dial_db), dial_db))
        ## print "#", txt
        return CodeTerminal(txt)
Example #14
0
def __member(TypeCode, MaxTypeNameL, VariableName, MaxVariableNameL, IndentationOffset=""):
    my_def  = Lng._SOURCE_REFERENCE_BEGIN(TypeCode.sr)
    my_def += IndentationOffset
    my_def += Lng.CLASS_MEMBER_DEFINITION(TypeCode.get_pure_text(), MaxTypeNameL, 
                                          VariableName)
    my_def += Lng._SOURCE_REFERENCE_END(TypeCode.sr)
    return my_def
Example #15
0
def __copy_files(OutputDir, FileSet):
    include_db = [
        ("declarations",      "$$INCLUDE_TOKEN_CLASS_DEFINITION$$",     Lng.INCLUDE(Setup.output_token_class_file)),
        ("implementations.i", "$$INCLUDE_TOKEN_CLASS_IMPLEMENTATION$$", Lng.INCLUDE(Setup.output_token_class_file_implementation)),
        ("implementations-inline.i", "$$INCLUDE_TOKEN_CLASS_IMPLEMENTATION$$", Lng.INCLUDE(Setup.output_token_class_file_implementation)),
        ("token/TokenQueue",  "$$INCLUDE_TOKEN_CLASS_DEFINITION$$",     Lng.INCLUDE(Setup.output_token_class_file)),
        ("token/TokenQueue",  "$$INCLUDE_LEXER_CLASS_DEFINITION$$",     Lng.INCLUDE(Setup.output_header_file)),
    ]
    for path, dummy, dummy in include_db:
        directory, basename = os.path.split(path)
        assert (not directory and basename in dir_db[""]) \
               or (basename in dir_db["%s/" % directory])

    file_pair_list,   \
    out_directory_set = __get_source_drain_list(OutputDir, FileSet)

    # Make directories
    # Sort according to length => create parent directories before child.
    for directory in sorted(out_directory_set, key=len):
        if os.access(directory, os.F_OK) == True: continue
        os.makedirs(directory) # create parents, if necessary

    # Copy
    for source_file, drain_file in file_pair_list:
        content = open_file_or_die(source_file, "rb").read()
        for path, origin, replacement in include_db:
            if not source_file.endswith(path): continue
            content = content.replace(origin, replacement)

        content = adapt.do(content, OutputDir, OriginalPath=source_file)
        write_safely_and_close(drain_file, content)
Example #16
0
def _get_state_machine_vs_terminal_bad_indentation(BadSpaceCharacterSet,
                                                   IncidenceDb, dial_db):
    """Generate state machine that detects the 'bad indentation character'.
    Generate terminal that emboddies the defined 'bad indentation character
    handler' from the incidence_dab.

    RETURNS: [0] state machine
             [1] terminal
    """

    sm = DFA.from_character_set(BadSpaceCharacterSet,
                                E_IncidenceIDs.INDENTATION_BAD)

    on_bad_indentation_txt = "".join([
        "%s\n" %
        Lng.RAISE_ERROR_FLAG_BY_INCIDENCE_ID(E_IncidenceIDs.INDENTATION_BAD),
        Lng.SOURCE_REFERENCED(IncidenceDb[E_IncidenceIDs.INDENTATION_BAD])
    ])

    code = Lng.ON_BAD_INDENTATION(on_bad_indentation_txt,
                                  E_IncidenceIDs.INDENTATION_BAD, dial_db)

    terminal = loop.MiniTerminal(code,
                                 "<INDENTATION BAD INDENTATION CHARACTER>",
                                 E_IncidenceIDs.INDENTATION_BAD)

    return sm, terminal
Example #17
0
    def action(ThePattern, PatternName):
        txt = []
        if ThePattern.bipd_sm is not None:
            TerminalFactory.do_bipd_entry_and_return(txt, pattern)

        txt.append("%s\n" % Lng.STORE_LAST_CHARACTER(
            blackboard.required_support_begin_of_line()))
        txt.append("%s\n" % Lng.LEXEME_TERMINATING_ZERO_SET(True))
        txt.append('printf("%19s  \'%%s\'\\n", Lexeme); fflush(stdout);\n' %
                   PatternName)

        if "->1" in PatternName:
            txt.append(
                "me->current_analyzer_function = QUEX_NAME(Mr_analyzer_function);\n"
            )
        elif "->2" in PatternName:
            txt.append(
                "me->current_analyzer_function = QUEX_NAME(Mrs_analyzer_function);\n"
            )

        if "CONTINUE" in PatternName: txt.append("")
        elif "STOP" in PatternName: txt.append("return false;\n")
        else: txt.append("return true;\n")

        txt.append("%s\n" % Lng.GOTO(DoorID.continue_with_on_after_match()))
        ## print "#", txt
        return CodeTerminal(txt)
Example #18
0
def _type_definitions():
    token_descr = token_db.token_type_definition
    if Setup.computed_gotos_f: type_goto_label  = "void*"
    else:                      type_goto_label  = "int32_t"

    type_def_list = [
        ("lexatom_t",         Setup.lexatom.type),
        ("token_id_t",        token_descr.token_id_type),
        ("token_line_n_t",    token_descr.line_number_type.get_pure_text()),
        ("token_column_n_t",  token_descr.column_number_type.get_pure_text()),
        ("acceptance_id_t",   "int"),
        ("indentation_t",     "int"),
        ("stream_position_t", "intmax_t"),
        ("goto_label_t",      type_goto_label)
    ]

    excluded = ""
    if not blackboard.required_support_indentation_count():
        excluded = "indentation_t"

    def_str = "\n".join(Lng.QUEX_TYPE_DEF(original, customized_name) 
                        for customized_name, original in type_def_list 
                        if customized_name != excluded) \
              + "\n"

    return Lng.FRAME_IN_NAMESPACE_MAIN(def_str)
Example #19
0
    def do_end_of_stream(self, Code, ThePattern):
        """End of Stream: The terminating zero has been reached and no further
        content can be loaded.
        """
        lexeme_begin_f,     \
        terminating_zero_f, \
        adorned_code        = self.__adorn_user_code(Code, MatchF=True)

        # No indentation handler => Empty string.
        text = [
            Lng.DEFAULT_COUNTER_CALL(),
            self.txt_indentation_handler_call,
            #
            adorned_code,
            #
            Lng.ML_COMMENT(
                "End of Stream FORCES a return from the lexical analyzer, so that no\n"
                "tokens can be filled after the termination token."),
            Lng.GOTO(DoorID.return_with_on_after_match()),
        ]

        code = CodeTerminal(text,
                            SourceReference=Code.sr,
                            PureCode=Code.get_pure_code())
        return Terminal(code, "END_OF_STREAM")
Example #20
0
    def implement(self):
        """Transitions of characters that lie close to each other can be very 
        efficiently be identified by a switch statement. For example:

               switch( Value ) {
               case 1: ..
               case 2: ..
               ...
               case 100: ..
               }

        Is implemented by the very few lines in assembler (i386): 

               sall    $2, %eax
               movl    .L13(%eax), %eax
               jmp     *%eax

        where 'jmp *%eax' jumps immediately to the correct switch case.
        
        It is therefore of vital interest that those regions are *identified* 
        and *not split* by a bisection. To achieve this, such regions are made 
        a transition for themselves based on the character range that they 
        cover.
        """
        case_code_list = [
            (interval, Lng.TRANSITION_MAP_TARGET(interval, target))
            for interval, target in self.sub_map
            if target != self.moat
        ]

        return Lng.BRANCH_TABLE_ON_INTERVALS("input", case_code_list,
                   DefaultConsequence=Lng.TRANSITION_MAP_TARGET(None, self.moat))
Example #21
0
def _add_comment(psml, SmCommentOriginal, CounterDb):
    """On matching the comment state machine goto a terminal that does the 
    following:
    """
    if SmCommentOriginal is None: return

    comment_skip_iid = dial_db.new_incidence_id()

    # Disconnect from machines being used elsewhere.
    SmComment = SmCommentOriginal.clone()
    SmComment.set_id(comment_skip_iid)

    if SmComment.last_character_set().contains_only(ord('\n')):
        code = Lng.COMMAND_LIST([
            LineCountAdd(1),
            AssignConstant(E_R.Column, 1),
        ])
    else:
        count_info = CountInfo.from_StateMachine(
            SmComment, CounterDb, CodecTrafoInfo=Setup.buffer_codec)
        code = [
            Lng.COMMAND(Assign(E_R.ReferenceP, E_R.LexemeStartP)),
            CounterDb.do_CountInfo(count_info),
            Lng.COMMAND(Assign(E_R.LexemeStartP, E_R.ReferenceP))
        ]

    code.append(Lng.GOTO(DoorID.incidence(E_IncidenceIDs.INDENTATION_HANDLER)))

    terminal = Terminal(CodeTerminal(code), "INDENTATION COMMENT")
    terminal.set_incidence_id(comment_skip_iid)

    psml.append((SmComment, terminal))
Example #22
0
    def get_increment(txt, Increment, IncrementByLexemeLength, HelpStr):
        if IncrementByLexemeLength == 0 or Increment == 0:
            return 
        elif Increment != E_Count.VOID:
            arg = Lng.VALUE_STRING(Increment)
        else:
            arg = Lng.MULTIPLY_WITH("LexemeL", IncrementByLexemeLength)

        txt.append("__QUEX_IF_COUNT_%s_ADD(%s);\n" % (HelpStr, arg))
Example #23
0
 def unicode_to_output(self, CodeUnitN):
     txt = [
         Lng.ASSIGN(
             "unicode",
             "(uint32_t)(%s)" % Lng.OP("(int32_t)input", "+", "offset"))
     ]
     txt.extend(self.get_output_formatter(CodeUnitN))
     txt.append(Lng.PURE_RETURN)
     return ["    %s" % line for line in txt]
Example #24
0
def wrap_up(ModeName, FunctionBody, VariableDefs, ModeNameList, dial_db):
    txt_function = Lng.ANALYZER_FUNCTION(ModeName, Setup, VariableDefs, 
                                         FunctionBody, dial_db, ModeNameList) 
    txt_header   = Lng.HEADER_DEFINITIONS(dial_db) 
    assert isinstance(txt_header, (str, unicode))

    txt_analyzer = get_plain_strings(txt_function, dial_db)
    assert all_isinstance(txt_analyzer, (str, unicode))

    return [ txt_header ] + txt_analyzer
Example #25
0
def __create_mode_transition_and_token_sender(fh, Command):
    assert Command in ["GOTO", "GOSUB", "GOUP"]

    position = fh.tell()
    target_mode = ""
    token_sender = ""
    if check(fh, "("):
        skip_whitespace(fh)
        if Command != "GOUP":
            target_mode = __read_token_identifier(fh)
            skip_whitespace(fh)

        if check(fh, ")"):
            token_sender = ""

        elif Command == "GOUP" or check(fh, ","):
            skip_whitespace(fh)
            token_name = __read_token_identifier(fh)
            skip_whitespace(fh)

            if check(fh, ","):
                error_msg(
                    "Missing opening '(' after token name specification.\n"
                    "Note, that since version 0.50.1 the syntax for token senders\n"
                    "inside brief mode transitions is like:\n\n"
                    "     => GOTO(MYMODE, QUEX_TKN_MINE(Argument0, Argument1, ...));\n",
                    fh)

            token_sender = __create_token_sender_by_token_name(fh, token_name)

            if check(fh, ")") == False:
                error_msg("Missing closing ')' or ',' after '%s'." % Command,
                          fh)

        else:
            fh.seek(position)
            error_msg("Missing closing ')' or ',' after '%s'." % Command, fh)

    if check(fh, ";") == False:
        error_msg("Missing ')' or ';' after '%s'." % Command, fh)

    if Command in ["GOTO", "GOSUB"] and target_mode == "":
        error_msg(
            "Command %s requires at least one argument: The target mode." %
            Command, fh)

    # Code for mode change
    if Command == "GOTO": txt = Lng.MODE_GOTO(target_mode)
    elif Command == "GOSUB": txt = Lng.MODE_GOSUB(target_mode)
    else: txt = Lng.MODE_GOUP()

    # Code for token sending
    txt += token_sender

    return txt
Example #26
0
def _get_pre_context_epilog_definition(dial_db):
    backup_position = Lng.REGISTER_NAME(E_R.BackupStreamPositionOfLexemeStartP)

    txt = [
        Lng.LABEL(DoorID.global_end_of_pre_context_check(dial_db)),
        #-------------------
        Lng.IF(backup_position, "!=", "((QUEX_TYPE_STREAM_POSITION)-1)"),
            # "QUEX_NAME(Buffer_print_content)(&me->buffer);\n",
            # "std::cout << std::endl;\n",
            Lng.IF("false", "==", Lng.BUFFER_SEEK(backup_position)),
                Lng.RAISE_ERROR_FLAG("E_Error_File_SeekFailed"),
                Lng.RETURN,
            Lng.END_IF,
            Lng.LEXEME_START_SET(PositionStorage=None), # use '_read_p'
            # "std::cout << \"lexst \" << me->buffer._lexeme_start_p[0] << std::endl;",
            # "std::cout << \"readp \" << me->buffer._read_p[0] << std::endl;",
            # "QUEX_NAME(Buffer_print_content)(&me->buffer);\n",
            # "std::cout << std::endl;\n",
            Lng.ASSIGN(backup_position, "((QUEX_TYPE_STREAM_POSITION)-1)"),
        Lng.ELSE_FOLLOWS,
            #-----------------------
            # -- set the input stream back to the real current position.
            #    during backward lexing the analyzer went backwards, so it needs to be reset.
            Lng.INPUT_P_TO_LEXEME_START(),
        Lng.END_IF,
    ]

    return [ "%s\n" % line for line in txt ]
Example #27
0
    def __counter_code(self, LCCI):
        """Get the text of the source code required for 'counting'. This information
        has been stored along with the pattern before any transformation happened.
        No database or anything is required as this point.
        """
        run_time_counter_required_f, \
        cmd_list                     = SmLineColumnCountInfo.get_OpList(LCCI, ModeName=self.mode_name)

        self.run_time_counter_required_f |= run_time_counter_required_f
        text                              = Lng.COMMAND_LIST(cmd_list, self.dial_db)
        return "".join(Lng.REPLACE_INDENT(text))
Example #28
0
def do(Interval, Target, IndentF=False):
    global Setup
    global Lng

    if hasattr(Target, "code"): txt = Target.code()
    elif type(Target) == long: txt = [Lng.GOTO_ADDRESS(Target)]
    else: txt = [Target]

    if Interval is not None and Setup.comment_transitions_f:
        txt.append(Lng.COMMENT(Interval.get_utf8_string()))

    return txt
Example #29
0
def __namespace_brackets(DefineF=False):
    token_descr = blackboard.token_type_definition

    if Setup.language.upper() == "C++":
        open_str = Lng.NAMESPACE_OPEN(token_descr.name_space).strip()
        close_str = Lng.NAMESPACE_CLOSE(token_descr.name_space).strip()
        if DefineF:
            open_str = open_str.replace("\n", "\\\n")
            close_str = close_str.replace("\n", "\\\n")
        return open_str, close_str
    else:
        return "", ""
Example #30
0
def get_skipper(TheAnalyzer, OpenerSequence, CloserSequence, OnSkipRangeOpen,
                DoorIdAfter, CounterDb):
    """
                                    .---<---+----------<------+------------------.
                                    |       |                 |                  |
                                    |       | not             | open_n += 1      |  
                                  .------.  | Closer[0]       |                  |
       -------------------------->| Loop +--'                 |                  |
                                  |      |                    | yes              | 
                                  |      |                    |                  |
                                  |      |          .-------------.              |
                                  |      +----->----| Opener[1-N] |              |
                                  |      |          |      ?      |              |
                                  |      |          '-------------'              |
                                  |      |                                       | open_n > 0
                                  |      |          .-------------.              | 
                                  |      +----->----| Closer[1-N] |--------------+------> RESTART
                                  |      |          |      ?      | open_n -= 1    else
                                  |      |          '-------------'             
                                  |      |                             
                                  |  BLC +-->-.  
                              .->-|      |     \                 Reload State 
            .-DoorID(S, 1)--./    '------'      \            .------------------.
         .--| after_reload  |                    \          .---------------.   |
         |  '---------------'                     '---------| before_reload |   |
         |                                                  '---------------'   |
         '---------------------------------------------------|                  |
                                                     success '------------------'     
                                                                     | failure      
                                                                     |            
                                                              .---------------.       
                                                              | SkipRangeOpen |       
                                                              '---------------'                                                                   

    """
    psml = _get_state_machine_vs_terminal_list(CloserSequence, OpenerSequence,
                                               CounterDb, DoorIdAfter)
    count_op_factory = CountInfoMap.from_LineColumnCount(
        CounterDb, NumberSet_All(), Lng.INPUT_P())
    result,          \
    door_id_beyond   = loop.do(count_op_factory,
                               OnLoopExit        = [ Op.GotoDoorId(DoorIdAfter) ],
                               LexemeEndCheckF   = False,
                               LexemeMaintainedF = False,
                               EngineType        = engine.FORWARD,
                               ReloadStateExtern = TheAnalyzer.reload_state,
                               ParallelSmTerminalPairList = psml)

    counter_variable = Lng.REGISTER_NAME(E_R.Counter)
    variable_db.require(counter_variable)
    result[0:0] = "%s = 0;\n" % counter_variable
    return result
Example #31
0
def do_with_counter(Mode, ModeNameList):
    txt = []
    Lng.debug_unit_name_set("Counter:%s" % Mode.name)
    if Mode.run_time_counter_db is not None:
        variable_db.init()
        txt.append(
            run_time_counter.get(Mode.run_time_counter_db, Mode.name)
        )

    analyzer_txt = do(Mode, ModeNameList)
    assert isinstance(analyzer_txt, list)
    txt.extend(analyzer_txt)
    return txt
Example #32
0
def _code_terminal_on_bad_indentation_character(code, ISetup, ModeName,
                                                incidence_db,
                                                BadIndentationIid):
    if ISetup.bad_character_set.get() is None:
        return
    on_bad_indentation_txt = Lng.SOURCE_REFERENCED(
        incidence_db[E_IncidenceIDs.INDENTATION_BAD])
    code.extend([
        "%s\n" % Lng.LABEL(DoorID.incidence(BadIndentationIid)),
        "#define BadCharacter (me->buffer._input_p[-1])\n",
        "%s\n" % on_bad_indentation_txt, "#undef  BadCharacter\n",
        "%s\n" % Lng.GOTO(DoorID.global_reentry())
    ])
Example #33
0
    def get_counter_text(self, ThePattern):
        """Get the text of the source code required for 'counting'. This information
        has been stored along with the pattern before any transformation happened.
        No database or anything is required as this point.
        """
        if ThePattern is None:
            default_counter_f = True
            text = Lng.DEFAULT_COUNTER_CALL()
        else:
            default_counter_f, \
            text               = counter_for_pattern.get(ThePattern)

        self.required_default_counter_f |= default_counter_f
        return "".join(Lng.REPLACE_INDENT(text))
Example #34
0
def get_setter_getter(Descr):
    """NOTE: All names are unique even in combined unions."""
    TL = Descr.type_name_length_max()
    NL = Descr.variable_name_length_max()
    variable_db = Descr.get_member_db()
    txt = ""
    for variable_name, info in variable_db.items():
        type_code = info[0]
        access    = info[1]
        type_str  = type_code.get_pure_text()
        txt += Lng._SOURCE_REFERENCE_BEGIN(type_code.sr)
        my_def = "    %s%s get_%s() const %s{ return %s; }" \
                 % (type_str,      " " * (TL - len(type_str)), 
                    variable_name, " " * ((NL + TL)- len(variable_name)), 
                    access)
        txt += my_def

        type_str = type_str.strip()
        type_str = type_str.replace("\t", " ")
        while type_str.find("  ") != -1:
            type_str = type_str.replace("  ", " ")
        if type_str not in ["char", "unsigned char", "singed char",
                            "short", "unsigned short", "singed short",
                            "int", "unsigned int", "singed int",
                            "long", "unsigned long", "singed long",
                            "float", "unsigned float", "singed float",
                            "double", "unsigned double", "singed double",
                            "uint8_t", "uint16_t", "uint32_t",
                            "int8_t", "int16_t", "int32_t",
                            "size_t", "uintptr_t", "ptrdiff_t"]:
            type_str += "&"

        txt += Lng._SOURCE_REFERENCE_BEGIN(type_code.sr)
        my_def = "    void%s set_%s(%s Value) %s{ %s = Value; }" \
               % (" " * (TL - len("void")), 
                  variable_name, type_str, " " * (NL + TL - (len(type_str) + len(variable_name))), 
                  access)
        txt += my_def

    txt += Lng._SOURCE_REFERENCE_END()
    return txt
Example #35
0
File: core.py Project: xxyzzzq/quex
def do():
    """Generates state machines for all modes. Each mode results into 
       a separate state machine that is stuck into a virtual function
       of a class derived from class 'quex_mode'.
    """
    if Setup.language == "DOT": 
        return do_plot()

    mode_description_db = quex_file_parser.do(Setup.input_mode_files)

    # (*) Generate the token ids
    #     (This needs to happen after the parsing of mode_db, since during that
    #      the token_id_db is developed.)
    if Setup.external_lexeme_null_object != "":
        # Assume external implementation
        token_id_header                        = None
        function_map_id_to_name_implementation = ""
    else:
        token_id_header                        = token_id_maker.do(Setup) 
        function_map_id_to_name_implementation = token_id_maker.do_map_id_to_name_function()

    # (*) [Optional] Make a customized token class
    class_token_header, \
    class_token_implementation = token_class_maker.do(function_map_id_to_name_implementation)

    if Setup.token_class_only_f:
        write_safely_and_close(blackboard.token_type_definition.get_file_name(), 
                                 do_token_class_info() \
                               + class_token_header)
        write_safely_and_close(Setup.output_token_class_file_implementation,
                               class_token_implementation)
        write_safely_and_close(Setup.output_token_id_file, token_id_header)
        Lng.straighten_open_line_pragmas(Setup.output_token_id_file)
        Lng.straighten_open_line_pragmas(Setup.output_token_class_file_implementation)
        Lng.straighten_open_line_pragmas(blackboard.token_type_definition.get_file_name())
        return

    # (*) implement the lexer mode-specific analyser functions
    #     During this process: mode_description_db --> mode_db
    function_analyzers_implementation, \
    mode_db                            = analyzer_functions_get(mode_description_db)

    # (*) Implement the 'quex' core class from a template
    # -- do the coding of the class framework
    configuration_header    = configuration.do(mode_db)
    analyzer_header         = analyzer_class.do(mode_db)
    analyzer_implementation = analyzer_class.do_implementation(mode_db) + "\n"
    mode_implementation     = mode_classes.do(mode_db)

    # (*) [Optional] Generate a converter helper
    codec_converter_helper_header, \
    codec_converter_helper_implementation = codec_converter_helper.do()
    
    # Implementation (Potential Inline Functions)
    if class_token_implementation is not None:
         analyzer_implementation += class_token_implementation + "\n" 

    # Engine (Source Code)
    engine_txt =   Lng.ENGINE_TEXT_EPILOG()               + "\n" \
                 + mode_implementation                    + "\n" \
                 + function_analyzers_implementation      + "\n" \
                 + function_map_id_to_name_implementation + "\n" 

    # (*) Write Files ___________________________________________________________________
    if codec_converter_helper_header is not None:
        write_safely_and_close(Setup.output_buffer_codec_header,   
                               codec_converter_helper_header) 
        write_safely_and_close(Setup.output_buffer_codec_header_i, 
                               codec_converter_helper_implementation) 

    if token_id_header is not None:
        write_safely_and_close(Setup.output_token_id_file, token_id_header)

    write_safely_and_close(Setup.output_configuration_file, configuration_header)

    if Setup.language == "C":
        engine_txt     += analyzer_implementation
    else:
        analyzer_header = analyzer_header.replace("$$ADDITIONAL_HEADER_CONTENT$$", 
                                                  analyzer_implementation)

    write_safely_and_close(Setup.output_header_file, analyzer_header)
    write_safely_and_close(Setup.output_code_file,   engine_txt)

    if class_token_header is not None:
        write_safely_and_close(blackboard.token_type_definition.get_file_name(), 
                               class_token_header)

    Lng.straighten_open_line_pragmas(Setup.output_header_file)
    Lng.straighten_open_line_pragmas(Setup.output_code_file)
    if not blackboard.token_type_definition.manually_written():
        Lng.straighten_open_line_pragmas(blackboard.token_type_definition.get_file_name())

    if Setup.source_package_directory != "":
        source_package.do()
Example #36
0
def rw_generator(N):
    """Iterable over all commands from the example_db.
    """
    for write_n in xrange(N):
        base = ["R"] + [" "] * (N - write_n - 1) + ["W"] * write_n
        for setting in set(permutations(base, N)):
            yield setting 

def rw_get(Flag):
    if   Flag == "R": return Op.Assign(E_R.InputP,       E_R.LexemeStartP)
    elif Flag == "W": return Op.Assign(E_R.LexemeStartP, E_R.InputP)
    else:             return Op.Assign(E_R.Column,       E_R.CharacterBeginP)

def string_cl(Name, Cl):
    if len(Cl) == 0:
        return "    %s: <empty>" % Name
    txt = "    %s: [0] %s\n" % (Name, Cl[0])
    for i, cmd in enumerate(Cl[1:]):
        txt += "       [%i] %s\n" % (i+1, cmd)
    return txt

def print_cl(Name, Cl):
    print string_cl(Name, Cl)

class MiniAnalyzer:
    def __init__(self):
        self.engine_type = engine.FORWARD

Lng.register_analyzer(MiniAnalyzer())