Exemplo n.º 1
0
def __prepare_end_of_stream_action(Mode, IndentationSupportF,
                                   BeginOfLineSupportF):
    if not Mode.has_code_fragment_list("on_end_of_stream"):
        # We cannot make any assumptions about the token class, i.e. whether
        # it can take a lexeme or not. Thus, no passing of lexeme here.
        txt = "self_send(__QUEX_SETTING_TOKEN_ID_TERMINATION);\n"
        txt += "RETURN;\n"

        Mode.set_code_fragment_list("on_end_of_stream", CodeFragment(txt))

    if IndentationSupportF:
        if Mode.default_indentation_handler_sufficient():
            code = "QUEX_NAME(on_indentation)(me, /*Indentation*/0, LexemeNull);\n"
        else:
            code = "QUEX_NAME(%s_on_indentation)(me, /*Indentation*/0, LexemeNull);\n" % Mode.name

        code_fragment = CodeFragment(code)
        Mode.insert_code_fragment_at_front("on_end_of_stream", code_fragment)

    # RETURNS: end_of_stream_action, db
    return __prepare(Mode,
                     Mode.get_code_fragment_list("on_end_of_stream"),
                     None,
                     EOF_ActionF=True,
                     BeginOfLineSupportF=BeginOfLineSupportF)
Exemplo n.º 2
0
    def __init__(self, FileName, ClassName, NameSpace, ClassNameSafe,
                 TokenIDType):

        self.__file_name = FileName
        self.class_name = ClassName
        self.name_space = NameSpace
        self.class_name_safe = ClassNameSafe

        self.column_number_type = CodeFragment("size_t")
        self.line_number_type = CodeFragment("size_t")
        self.token_id_type = CodeFragment(TokenIDType)
Exemplo n.º 3
0
def __start_mode(applicable_mode_name_list, mode_name_list):
    """If more then one mode is defined, then that requires an explicit 
       definition 'start = mode'.
    """
    assert len(applicable_mode_name_list) != 0

    start_mode = blackboard.initial_mode.get_pure_code()
    if start_mode == "":
        # Choose an applicable mode as start mode
        start_mode = applicable_mode_name_list[0]
        blackboard.initial_mode = CodeFragment(start_mode)
        if len(applicable_mode_name_list) > 1:
            error_msg("No initial mode defined via 'start' while more than one applicable mode exists.\n" + \
                      "Use for example 'start = %s;' in the quex source file to define an initial mode." \
                      % start_mode)
        # This Branch: start mode is applicable and present

    else:
        FileName = blackboard.initial_mode.filename
        LineN = blackboard.initial_mode.line_n
        # Start mode present and applicable?
        verify_word_in_list(start_mode, mode_name_list,
                            "Start mode '%s' is not defined." % start_mode,
                            FileName, LineN)
        verify_word_in_list(
            start_mode, applicable_mode_name_list,
            "Start mode '%s' is inheritable only and cannot be instantiated." %
            start_mode, FileName, LineN)
Exemplo n.º 4
0
    def __init__(self, Core=None):
        if Core is None:
            self._file_name                = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name, 
                                             "token class (options --token-class, --tc)")
            self.class_name            = Setup.token_class_name
            self.class_name_safe       = Setup.token_class_name_safe
            self.name_space            = Setup.token_class_name_space
            self.open_for_derivation_f      = False
            self.token_contains_token_id_f  = True
            self.token_id_type         = CodeFragment("size_t")
            self.column_number_type    = CodeFragment("size_t")
            self.line_number_type      = CodeFragment("size_t")

            self.distinct_db = {}
            self.union_db    = {}

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = CodeFragment("")

        else:
            self._file_name                = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name            = Core.class_name
            self.class_name_safe       = Core.class_name_safe
            self.name_space            = Core.name_space
            self.open_for_derivation_f      = Core.open_for_derivation_f
            self.token_contains_token_id_f  = Core.token_contains_token_id_f
            self.token_id_type         = Core.token_id_type
            self.column_number_type    = Core.column_number_type
            self.line_number_type      = Core.line_number_type

            self.distinct_db           = Core.distinct_db
            self.union_db              = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
Exemplo n.º 5
0
def __prepare_on_failure_action(Mode, BeginOfLineSupportF,
                                require_terminating_zero_preparation_f):
    if not Mode.has_code_fragment_list("on_failure"):
        txt = "QUEX_ERROR_EXIT(\"\\n    Match failure in mode '%s'.\\n\"\n" % Mode.name
        txt += "                \"    No 'on_failure' section provided for this mode.\\n\"\n"
        txt += "                \"    Proposal: Define 'on_failure' and analyze 'Lexeme'.\\n\");\n"
        Mode.set_code_fragment_list("on_failure", CodeFragment(txt))

    # RETURNS: on_failure_action, db
    return __prepare(Mode,
                     Mode.get_code_fragment_list("on_failure"),
                     None,
                     Default_ActionF=True,
                     BeginOfLineSupportF=BeginOfLineSupportF,
                     require_terminating_zero_preparation_f=
                     require_terminating_zero_preparation_f)
Exemplo n.º 6
0
    def __init__(self, Core=None):
        if Core is None:
            self._file_name = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name,
                                             "token class (options --token-class, --tc)")
            self.class_name = Setup.token_class_name
            self.class_name_safe = Setup.token_class_name_safe
            self.name_space = Setup.token_class_name_space
            self.open_for_derivation_f = False
            self.token_contains_token_id_f = True
            self.token_id_type = CodeFragment("size_t")
            self.column_number_type = CodeFragment("size_t")
            self.line_number_type = CodeFragment("size_t")

            self.distinct_db = {}
            self.union_db = {}

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = CodeFragment("")

        else:
            self._file_name = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name = Core.class_name
            self.class_name_safe = Core.class_name_safe
            self.name_space = Core.name_space
            self.open_for_derivation_f = Core.open_for_derivation_f
            self.token_contains_token_id_f = Core.token_contains_token_id_f
            self.token_id_type = Core.token_id_type
            self.column_number_type = Core.column_number_type
            self.line_number_type = Core.line_number_type

            self.distinct_db = Core.distinct_db
            self.union_db = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
Exemplo n.º 7
0
    def __init__(self, Name, Filename, LineN):

        self.filename = Filename
        self.line_n = LineN

        self.name = Name
        self.base_modes = []
        # Read pattern information into dictionary object. This allows for the following:
        # (i)   inheritance of pattern behavior in different modes.
        # (ii)  'virtual' patterns in the sense that their behavior can be
        #       overwritten.
        self.__matches = {
        }  # genuine patterns as specified in the mode declaration

        self.__repriorization_db = {
        }  # patterns of the base class to be reprioritized
        #                              # map: pattern --> new pattern index
        self.__deletion_db = {}  # patterns of the base class to be deleted

        # The list of actual pattern action pairs is constructed inside the function
        # '__post_process(...)'. Function 'get_pattern_action_pairs(...) calls it
        # in case that this variable is still [].
        self.__pattern_action_pair_list = []

        # (*) Default Options
        self.options = {}
        for name, descr in mode_option_info_db.items():
            # Not only copy the reference, copy the default value object!
            self.options[name] = deepcopy(descr.default_value)

        # (*) Default Event Handler: Empty
        self.events = {}
        for name in event_handler_db.keys():
            self.events[name] = CodeFragment()

        # Register ModeDescription at the mode database
        mode_description_db[Name] = self
Exemplo n.º 8
0
class TokenTypeDescriptorCore:
    """Object used during the generation of the TokenTypeDescriptor."""
    def __init__(self, Core=None):
        if Core is None:
            self._file_name = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name,
                                             "token class (options --token-class, --tc)")
            self.class_name = Setup.token_class_name
            self.class_name_safe = Setup.token_class_name_safe
            self.name_space = Setup.token_class_name_space
            self.open_for_derivation_f = False
            self.token_contains_token_id_f = True
            self.token_id_type = CodeFragment("size_t")
            self.column_number_type = CodeFragment("size_t")
            self.line_number_type = CodeFragment("size_t")

            self.distinct_db = {}
            self.union_db = {}

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = CodeFragment("")

        else:
            self._file_name = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name = Core.class_name
            self.class_name_safe = Core.class_name_safe
            self.name_space = Core.name_space
            self.open_for_derivation_f = Core.open_for_derivation_f
            self.token_contains_token_id_f = Core.token_contains_token_id_f
            self.token_id_type = Core.token_id_type
            self.column_number_type = Core.column_number_type
            self.line_number_type = Core.line_number_type

            self.distinct_db = Core.distinct_db
            self.union_db = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]

    def set_file_name(self, FileName):
        self._file_name = FileName
        ext = Setup.language_db[Setup.language].extension_db[
            Setup.output_file_naming_scheme][E_Files.HEADER_IMPLEMTATION]
        self._file_name_implementation = FileName + ext

    def __repr__(self):
        txt = ""
        if self._file_name != "":
            txt += "file name: '%s'\n" % self._file_name
        txt += "class:     '%s'\n" % self.class_name
        if self.open_for_derivation_f:
            txt += "           (with virtual destructor)\n"
        if self.token_contains_token_id_f == False:
            txt += "           (token id not part of token object)\n"
        txt += "namespace: '%s'\n" % repr(self.name_space)[1:-1]
        txt += "type(token_id)      = %s\n" % self.token_id_type.get_pure_code(
        )
        txt += "type(column_number) = %s\n" % self.column_number_type.get_pure_code(
        )
        txt += "type(line_number)   = %s\n" % self.line_number_type.get_pure_code(
        )

        txt += "distinct members {\n"
        # '0' to make sure, that it works on an empty sequence too.
        L = self.distinct_members_type_name_length_max()
        for name, type_code in self.distinct_db.items():
            txt += "    %s%s %s\n" % (type_code.get_pure_code(), " " *
                                      (L - len(type_code.get_pure_code())),
                                      name)
        txt += "}\n"
        txt += "union members {\n"

        # '0' to make sure, that it works on an empty sequence too.
        L = self.union_members_type_name_length_max()
        for name, type_descr in self.union_db.items():
            if type(type_descr) == dict:
                txt += "    {\n"
                for sub_name, sub_type in type_descr.items():
                    txt += "        %s%s %s\n" % \
                           (sub_type.get_pure_code(),
                            " " * (L - len(sub_type.get_pure_code())-4),
                            sub_name)
                txt += "    }\n"
            else:
                txt += "    %s%s %s\n" % \
                       (type_descr.get_pure_code(),
                        " " * (L - len(type_descr.get_pure_code())),
                        name)
        txt += "}\n"

        # constructor / copy / destructor
        if self.constructor.get_pure_code() != "":
            txt += "constructor {\n"
            txt += self.constructor.get_code()
            txt += "}"

        if self.copy.get_pure_code() != "":
            txt += "copy {\n"
            txt += self.copy.get_code()
            txt += "}"

        if self.destructor.get_pure_code() != "":
            txt += "destructor {\n"
            txt += self.destructor.get_code()
            txt += "}"

        if self.body.get_pure_code() != "":
            txt += "body {\n"
            txt += self.body.get_code()
            txt += "}"

        return txt

    def manually_written(self):
        return False
Exemplo n.º 9
0
class TokenTypeDescriptorCore:
    """Object used during the generation of the TokenTypeDescriptor."""
    def __init__(self, Core=None):
        if Core is None:
            self._file_name                = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name, 
                                             "token class (options --token-class, --tc)")
            self.class_name            = Setup.token_class_name
            self.class_name_safe       = Setup.token_class_name_safe
            self.name_space            = Setup.token_class_name_space
            self.open_for_derivation_f      = False
            self.token_contains_token_id_f  = True
            self.token_id_type         = CodeFragment("size_t")
            self.column_number_type    = CodeFragment("size_t")
            self.line_number_type      = CodeFragment("size_t")

            self.distinct_db = {}
            self.union_db    = {}

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = CodeFragment("")

        else:
            self._file_name                = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name            = Core.class_name
            self.class_name_safe       = Core.class_name_safe
            self.name_space            = Core.name_space
            self.open_for_derivation_f      = Core.open_for_derivation_f
            self.token_contains_token_id_f  = Core.token_contains_token_id_f
            self.token_id_type         = Core.token_id_type
            self.column_number_type    = Core.column_number_type
            self.line_number_type      = Core.line_number_type

            self.distinct_db           = Core.distinct_db
            self.union_db              = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
            

    def set_file_name(self, FileName):
        self._file_name = FileName
        ext = Setup.language_db[Setup.language].extension_db[Setup.output_file_naming_scheme][E_Files.HEADER_IMPLEMTATION]
        self._file_name_implementation = FileName + ext

    def __repr__(self):
        txt = ""
        if self._file_name != "": 
            txt += "file name: '%s'\n" % self._file_name
        txt += "class:     '%s'\n" % self.class_name
        if self.open_for_derivation_f: 
            txt += "           (with virtual destructor)\n"
        if self.token_contains_token_id_f == False:
            txt += "           (token id not part of token object)\n"
        txt += "namespace: '%s'\n" % repr(self.name_space)[1:-1]
        txt += "type(token_id)      = %s\n" % self.token_id_type.get_pure_code()
        txt += "type(column_number) = %s\n" % self.column_number_type.get_pure_code()
        txt += "type(line_number)   = %s\n" % self.line_number_type.get_pure_code()

        txt += "distinct members {\n"
        # '0' to make sure, that it works on an empty sequence too.
        L = self.distinct_members_type_name_length_max()
        for name, type_code in self.distinct_db.items():
            txt += "    %s%s %s\n" % (type_code.get_pure_code(), " " * (L - len(type_code.get_pure_code())), name)
        txt += "}\n"
        txt += "union members {\n"

        # '0' to make sure, that it works on an empty sequence too.
        L = self.union_members_type_name_length_max()
        for name, type_descr in self.union_db.items():
            if type(type_descr) == dict:
                txt += "    {\n"
                for sub_name, sub_type in type_descr.items():
                    txt += "        %s%s %s\n" % \
                           (sub_type.get_pure_code(), 
                            " " * (L - len(sub_type.get_pure_code())-4), 
                            sub_name)
                txt += "    }\n"
            else:
                txt += "    %s%s %s\n" % \
                       (type_descr.get_pure_code(), 
                        " " * (L - len(type_descr.get_pure_code())), 
                        name)
        txt += "}\n"

        # constructor / copy / destructor
        if self.constructor.get_pure_code() != "":
            txt += "constructor {\n"
            txt += self.constructor.get_code()
            txt += "}"
        
        if self.copy.get_pure_code() != "":
            txt += "copy {\n"
            txt += self.copy.get_code()
            txt += "}"

        if self.destructor.get_pure_code() != "":
            txt += "destructor {\n"
            txt += self.destructor.get_code()
            txt += "}"

        if self.body.get_pure_code() != "":
            txt += "body {\n"
            txt += self.body.get_code()
            txt += "}"

        return txt

    def manually_written(self):
        return False
Exemplo n.º 10
0
def __prepare(Mode,
              CodeFragment_or_CodeFragments,
              ThePattern,
              Default_ActionF=False,
              EOF_ActionF=False,
              SelfCountingActionF=False,
              BeginOfLineSupportF=False,
              require_terminating_zero_preparation_f=False):
    """-- If there are multiple handlers for a single event they are combined
    
       -- Adding debug information printer (if desired)
    
       -- The task of this function is it to adorn the action code for each pattern with
          code for line and column number counting.
    """
    assert Mode.__class__.__name__ == "Mode"
    assert ThePattern is None or ThePattern.__class__.__name__ == "Pattern"
    assert type(Default_ActionF) == bool
    assert type(EOF_ActionF) == bool
    # We assume that any state machine presented here has been propperly created
    # and thus contains some side information about newline number, character number etc.

    if type(CodeFragment_or_CodeFragments) == list:
        assert Default_ActionF or EOF_ActionF, \
               "Action code formatting: Multiple Code Fragments can only be specified for default or\n" + \
               "end of stream action."
        CodeFragmentList = CodeFragment_or_CodeFragments
    else:
        CodeFragmentList = [CodeFragment_or_CodeFragments]

    user_code = ""
    variable_db = {}

    # (*) Code to be performed on every match -- before the related action
    on_match_code = ""
    if Mode.has_code_fragment_list("on_match"):
        on_match_code, rtzp_f = get_code(
            Mode.get_code_fragment_list("on_match"), variable_db)
        require_terminating_zero_preparation_f = require_terminating_zero_preparation_f or rtzp_f

    # (*) Code to count line and column numbers
    lc_count_code = ""
    if not SelfCountingActionF:
        lc_count_code = "    %s\n" % __get_line_and_column_counting(
            ThePattern, EOF_ActionF)

    #if (not Default_ActionF) and (not EOF_ActionF):
    #    lc_count_code += "    __QUEX_ASSERT_COUNTER_CONSISTENCY(&self.counter);\n"

    # (*) THE user defined action to be performed in case of a match
    user_code, rtzp_f = get_code(CodeFragmentList, variable_db)
    require_terminating_zero_preparation_f = require_terminating_zero_preparation_f or rtzp_f

    store_last_character_str = ""
    if BeginOfLineSupportF:
        store_last_character_str = "    %s\n" % LanguageDB.ASSIGN(
            "me->buffer._character_before_lexeme_start",
            LanguageDB.INPUT_P_DEREFERENCE(-1))

    set_terminating_zero_str = ""
    if require_terminating_zero_preparation_f:
        set_terminating_zero_str += "    QUEX_LEXEME_TERMINATING_ZERO_SET(&me->buffer);\n"

    txt = ""
    txt += lc_count_code
    txt += store_last_character_str
    txt += set_terminating_zero_str
    txt += on_match_code
    txt += "    {\n"
    txt += user_code
    txt += "\n    }"

    return CodeFragment(txt), variable_db