Ejemplo n.º 1
0
def __setup_analyzer_class(Setup):
    """ X0::X1::X2::ClassName --> analyzer_class_name = ClassName
                                  analyzer_name_space = ["X0", "X1", "X2"]
        ::ClassName --> analyzer_class_name = ClassName
                        analyzer_name_space = []
        ClassName --> analyzer_class_name = ClassName
                      analyzer_name_space = ["quex"]
    """
    if Setup.analyzer_class.find("::") == -1:
        Setup.analyzer_class = "quex::%s" % Setup.analyzer_class

    Setup.analyzer_class_name, \
    Setup.analyzer_name_space, \
    Setup.analyzer_name_safe   = \
         read_namespaced_name(Setup.analyzer_class,
                              "analyzer class (options -o, --analyzer-class)")

    if Setup.show_name_spaces_f:
        print "Analyzer: {"
        print "     class_name:  %s;" % Setup.analyzer_class_name
        print "     name_space:  %s;" % repr(Setup.analyzer_name_space)[1:-1]
        print "     name_prefix: %s;" % Setup.analyzer_name_safe
        print "}"

    Setup.analyzer_derived_class_name,       \
    Setup.analyzer_derived_class_name_space, \
    Setup.analyzer_derived_class_name_safe = \
         read_namespaced_name(Setup.analyzer_derived_class_name,
                              "derived analyzer class (options --derived-class, --dc)",
                              AllowEmptyF=True)
Ejemplo n.º 2
0
def __setup_analyzer_class(Setup):
    """ X0::X1::X2::ClassName --> analyzer_class_name = ClassName
                                  analyzer_name_space = ["X0", "X1", "X2"]
        ::ClassName --> analyzer_class_name = ClassName
                        analyzer_name_space = []
        ClassName --> analyzer_class_name = ClassName
                      analyzer_name_space = ["quex"]
    """
    if Setup.analyzer_class.find("::") == -1:
        Setup.analyzer_class = "quex::%s" % Setup.analyzer_class

    Setup.analyzer_class_name, \
    Setup.analyzer_name_space, \
    Setup.analyzer_name_safe   = \
         read_namespaced_name(Setup.analyzer_class, 
                              "analyzer class (options -o, --analyzer-class)")

    if Setup.show_name_spaces_f:
        print "Analyzer: {"
        print "     class_name:  %s;" % Setup.analyzer_class_name
        print "     name_space:  %s;" % repr(Setup.analyzer_name_space)[1:-1]
        print "     name_prefix: %s;" % Setup.analyzer_name_safe   
        print "}"

    Setup.analyzer_derived_class_name,       \
    Setup.analyzer_derived_class_name_space, \
    Setup.analyzer_derived_class_name_safe = \
         read_namespaced_name(Setup.analyzer_derived_class_name, 
                              "derived analyzer class (options --derived-class, --dc)",
                              AllowEmptyF=True)
Ejemplo n.º 3
0
def __setup_lexeme_null(Setup):
    if len(Setup.external_lexeme_null_object) != 0:
        lexeme_null_object = Setup.external_lexeme_null_object
        default_name_space = Setup.analyzer_name_space
    elif Setup.token_class_only_f:
        lexeme_null_object = "LexemeNullObject"
        default_name_space = Setup.token_class_name_space
    else:
        lexeme_null_object = "LexemeNullObject"
        default_name_space = Setup.analyzer_name_space

    if lexeme_null_object.find("::") == -1:
        # By default, Setup the token in the analyzer's namespace
        if len(Setup.analyzer_name_space) != 0:
            name_space = reduce(lambda x, y: "%s::%s" % (x, y), default_name_space)
        else:
            name_space = ""
        lexeme_null_object = "%s::%s" % (name_space, lexeme_null_object)

    Setup.lexeme_null_name,        \
    Setup.lexeme_null_namespace,   \
    Setup.lexeme_null_name_safe  = \
         read_namespaced_name(lexeme_null_object, 
                              "lexeme null object (options --lexeme-null-object, --lno)")
    Setup.lexeme_null_full_name_cpp = "::" 
    for name in Setup.lexeme_null_namespace:
        Setup.lexeme_null_full_name_cpp += name + "::"
    Setup.lexeme_null_full_name_cpp += Setup.lexeme_null_name
Ejemplo n.º 4
0
def __parse_section(fh, descriptor, already_defined_list):
    global token_type_code_fragment_db
    assert type(already_defined_list) == list

    SubsectionList = ["name", "file_name", "standard", "distinct", "union", "inheritable", "noid"] \
                      + token_type_code_fragment_db.keys()

    position = fh.tell()
    skip_whitespace(fh)
    word = read_identifier(fh)
    if word == "":
        fh.seek(position)
        if check(fh, "}"): 
            fh.seek(position) 
            return False
        error_msg("Missing token_type section ('standard', 'distinct', or 'union').", fh)

    verify_word_in_list(word, SubsectionList, 
                        "Subsection '%s' not allowed in token_type section." % word, fh)

    if word == "name":
        if not check(fh, "="):
            error_msg("Missing '=' in token_type 'name' specification.", fh)
        descriptor.class_name, descriptor.name_space, descriptor.class_name_safe = read_namespaced_name(fh, "token_type")
        if not check(fh, ";"):
            error_msg("Missing terminating ';' in token_type 'name' specification.", fh)

    elif word == "inheritable":
        descriptor.open_for_derivation_f = True
        check_or_die(fh, ";")

    elif word == "noid":
        descriptor.token_contains_token_id_f = False;
        check_or_die(fh, ";")

    elif word == "file_name":
        if not check(fh, "="):
            error_msg("Missing '=' in token_type 'file_name' specification.", fh)
        descriptor.set_file_name(read_until_letter(fh, ";"))
        if not check(fh, ";"):
            error_msg("Missing terminating ';' in token_type 'file_name' specification.", fh)

    elif word in ["standard", "distinct", "union"]:
        if   word == "standard": parse_standard_members(fh, word, descriptor, already_defined_list)
        elif word == "distinct": parse_distinct_members(fh, word, descriptor, already_defined_list)
        elif word == "union":    parse_union_members(fh, word, descriptor, already_defined_list)

        if not check(fh, "}"):
            fh.seek(position)
            error_msg("Missing closing '}' at end of token_type section '%s'." % word, fh);

    elif word in token_type_code_fragment_db.keys():
        fragment     = code_fragment.parse(fh, word, AllowBriefTokenSenderF=False)        
        descriptor.__dict__[word] = fragment

    else: 
        assert False, "This code section section should not be reachable because 'word'\n" + \
                      "was checked to fit in one of the 'elif' cases."

    return True
Ejemplo n.º 5
0
def __setup_lexeme_null(Setup):
    if len(Setup.external_lexeme_null_object) != 0:
        lexeme_null_object = Setup.external_lexeme_null_object
        default_name_space = Setup.analyzer_name_space
    elif Setup.token_class_only_f:
        lexeme_null_object = "LexemeNullObject"
        default_name_space = Setup.token_class_name_space
    else:
        lexeme_null_object = "LexemeNullObject"
        default_name_space = Setup.analyzer_name_space

    if lexeme_null_object.find("::") == -1:
        # By default, Setup the token in the analyzer's namespace
        if len(Setup.analyzer_name_space) != 0:
            name_space = reduce(lambda x, y: "%s::%s" % (x, y),
                                default_name_space)
        else:
            name_space = ""
        lexeme_null_object = "%s::%s" % (name_space, lexeme_null_object)

    Setup.lexeme_null_name,        \
    Setup.lexeme_null_namespace,   \
    Setup.lexeme_null_name_safe  = \
         read_namespaced_name(lexeme_null_object,
                              "lexeme null object (options --lexeme-null-object, --lno)")
    Setup.lexeme_null_full_name_cpp = "::"
    for name in Setup.lexeme_null_namespace:
        Setup.lexeme_null_full_name_cpp += name + "::"
    Setup.lexeme_null_full_name_cpp += Setup.lexeme_null_name
Ejemplo n.º 6
0
def __setup_analyzer_class(Setup):
    """ X0::X1::X2::ClassName --> analyzer_class_name = ClassName
                                  analyzer_name_space = ["X0", "X1", "X2"]
        ClassName --> analyzer_class_name = ClassName
                      analyzer_name_space = []
    """
    # Default set here => able to detect seting on command line.
    if not Setup.analyzer_class: analyzer_class = "Lexer"
    else: analyzer_class = Setup.analyzer_class

    Setup.analyzer_class_name, \
    Setup.analyzer_name_space, \
    Setup.analyzer_name_safe   = \
         read_namespaced_name(analyzer_class,
                              "analyzer class (options -o, --analyzer-class)")
    __check_namespace_admissibility("analyzer class",
                                    Setup.analyzer_name_space)

    if Setup.show_name_spaces_f:
        print "FSM: {"
        print "     class_name:  %s;" % Setup.analyzer_class_name
        print "     name_space:  %s;" % repr(Setup.analyzer_name_space)[1:-1]
        print "     name_prefix: %s;" % Setup.analyzer_name_safe
        print "}"

    Setup.analyzer_derived_class_name,       \
    Setup.analyzer_derived_class_name_space, \
    Setup.analyzer_derived_class_name_safe = \
         read_namespaced_name(Setup.analyzer_derived_class_name,
                              "derived analyzer class (options --derived-class, --dc)",
                              AllowEmptyF=True)
    __check_namespace_admissibility("derived class",
                                    Setup.analyzer_derived_class_name_space)

    if not Setup.quex_lib:
        if Setup.language == "C": quex_lib = "quex"
        else: quex_lib = "quex::"

    Setup._quex_lib_prefix,     \
    Setup._quex_lib_name_space, \
    Setup._quex_lib_name_safe   = read_namespaced_name(quex_lib,
                                        "Naming of Quex-Lib functions. (options --quex-lib, --ql)",
                                        AllowEmptyF=True)
    __check_namespace_admissibility("derived class",
                                    Setup.analyzer_derived_class_name_space)
Ejemplo n.º 7
0
def __read_token_identifier(fh):
    """Parses a token identifier that may contain a namespace specification.

       Returns "", if no valid specification could be found.
    """
    identifier, name_space_list, dummy = read_namespaced_name(fh, "token identifier")
    if identifier == "": return ""
    if len(name_space_list) == 0: return identifier
    return reduce(lambda x, y: x + "::" + y, name_space_list + [identifier])
Ejemplo n.º 8
0
def __setup_token_id_prefix(setup):
    setup.token_id_prefix_plain,      \
    setup.token_id_prefix_name_space, \
    dummy                           = \
         read_namespaced_name(setup.token_id_prefix, 
                              "token prefix (options --token-id-prefix)")

    if len(setup.token_id_prefix_name_space) != 0 and setup.language.upper() == "C":
         error_msg("Token id prefix cannot contain a namespaces if '--language' is set to 'C'.")
Ejemplo n.º 9
0
def __setup_token_id_prefix(Setup):
    Setup.token_id_prefix_plain,      \
    Setup.token_id_prefix_name_space, \
    dummy                           = \
         read_namespaced_name(Setup.token_id_prefix, 
                              "token prefix (options --token-id-prefix)", 
                              AllowEmptyF=True)

    if len(Setup.token_id_prefix_name_space) != 0 and Setup.language.upper() == "C":
         error_msg("Token id prefix cannot contain a namespaces if '--language' is set to 'C'.")
Ejemplo n.º 10
0
def __read_token_identifier(fh):
    """Parses a token identifier that may contain a namespace specification.

       Returns "", if no valid specification could be found.
    """
    identifier, name_space_list, dummy = read_namespaced_name(
        fh, "token identifier")
    if identifier == "": return ""
    if len(name_space_list) == 0: return identifier
    return reduce(lambda x, y: x + "::" + y, name_space_list + [identifier])
Ejemplo n.º 11
0
def __read_token_identifier(fh):
    """Parses a token identifier that may contain a namespace specification.

       Returns "", if no valid specification could be found.
    """
    identifier, name_space_list, dummy = read_namespaced_name(
        fh, "token identifier")
    if identifier == "": return ""
    elif not name_space_list: return identifier
    return Lng.NAME_IN_NAMESPACE(identifier, name_space_list)
Ejemplo n.º 12
0
def __setup_token_id_prefix(Setup):
    Setup.token_id_prefix_plain,      \
    Setup.token_id_prefix_name_space, \
    dummy                           = \
         read_namespaced_name(Setup.token_id_prefix, 
                              "token prefix (options --token-id-prefix)", 
                              AllowEmptyF=True)

    if len(Setup.token_id_prefix_name_space) != 0 and Setup.language.upper() == "C":
         error.log("Token id prefix cannot contain a namespaces if '--language' is set to 'C'.")
Ejemplo n.º 13
0
def __setup_token_class(Setup):
    """ X0::X1::X2::ClassName --> token_class_name = ClassName
                                  token_name_space = ["X0", "X1", "X2"]
        ::ClassName --> token_class_name = ClassName
                        token_name_space = []
    """
    # Default set here => able to detect seting on command line.
    if not Setup.token_class:
        if Setup.analyzer_class:
            if Setup.analyzer_name_space:
                token_class = "%s::%s_Token" % ("::".join(
                    Setup.analyzer_name_space), Setup.analyzer_class_name)
            else:
                token_class = "%s_Token" % Setup.analyzer_class_name
        else:
            token_class = "Token"
    else:
        token_class = Setup.token_class

    # Token classes and derived classes have the freedom not to open a namespace,
    # thus no check 'if namespace == empty'.
    Setup.token_class_name,       \
    Setup.token_class_name_space, \
    Setup.token_class_name_safe = \
         read_namespaced_name(token_class,
                              "token class (options --token-class, --tc)")
    __check_namespace_admissibility("token class",
                                    Setup.token_class_name_space)

    if Setup.show_name_spaces_f:
        print "Token: {"
        print "     class_name:  %s;" % Setup.token_class_name
        print "     name_space:  %s;" % repr(
            Setup.token_class_name_space)[1:-1]
        print "     name_prefix: %s;" % Setup.token_class_name_safe
        print "}"

    if Setup.extern_token_class_file:
        token_db.token_type_definition = \
                TokenTypeDescriptorManual(Setup.extern_token_class_file,
                                          Setup.token_class_name,
                                          Setup.token_class_name_space,
                                          Setup.token_class_name_safe,
                                          Setup.token_id_type)
Ejemplo n.º 14
0
    def __init__(self, Core=None):
        if Core is None:
            self._file_name = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name,
                                             "token class (options --token-class, --tc)")
            self.class_name = Setup.token_class_name
            self.class_name_safe = Setup.token_class_name_safe
            self.name_space = Setup.token_class_name_space
            self.open_for_derivation_f = False
            self.token_contains_token_id_f = True
            self.token_id_type = CodeUser("size_t", SourceRef())
            self.column_number_type = CodeUser("size_t", SourceRef())
            self.line_number_type = CodeUser("size_t", SourceRef())

            self.distinct_db = {}
            self.union_db = {}

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            self._file_name = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name = Core.class_name
            self.class_name_safe = Core.class_name_safe
            self.name_space = Core.name_space
            self.open_for_derivation_f = Core.open_for_derivation_f
            self.token_contains_token_id_f = Core.token_contains_token_id_f
            self.token_id_type = Core.token_id_type
            self.column_number_type = Core.column_number_type
            self.line_number_type = Core.line_number_type

            self.distinct_db = Core.distinct_db
            self.union_db = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
Ejemplo n.º 15
0
    def __init__(self, Core=None):
        if Core is None:
            self._file_name                = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name, 
                                             "token class (options --token-class, --tc)")
            self.class_name            = Setup.token_class_name
            self.class_name_safe       = Setup.token_class_name_safe
            self.name_space            = Setup.token_class_name_space
            self.open_for_derivation_f      = False
            self.token_contains_token_id_f  = True
            self.token_id_type         = CodeUser("size_t", SourceRef())
            self.column_number_type    = CodeUser("size_t", SourceRef())
            self.line_number_type      = CodeUser("size_t", SourceRef())

            self.distinct_db = {}
            self.union_db    = {}

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            self._file_name                = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name            = Core.class_name
            self.class_name_safe       = Core.class_name_safe
            self.name_space            = Core.name_space
            self.open_for_derivation_f      = Core.open_for_derivation_f
            self.token_contains_token_id_f  = Core.token_contains_token_id_f
            self.token_id_type         = Core.token_id_type
            self.column_number_type    = Core.column_number_type
            self.line_number_type      = Core.line_number_type

            self.distinct_db           = Core.distinct_db
            self.union_db              = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
Ejemplo n.º 16
0
def __setup_token_class(Setup):
    """ X0::X1::X2::ClassName --> token_class_name = ClassName
                                  token_name_space = ["X0", "X1", "X2"]
        ::ClassName --> token_class_name = ClassName
                        token_name_space = []
        ClassName --> token_class_name = ClassName
                      token_name_space = analyzer_name_space
    """
    if Setup.token_class.find("::") == -1:
        # By default, Setup the token in the analyzer's namespace
        if len(Setup.analyzer_name_space) != 0:
            analyzer_name_space = reduce(lambda x, y: "%s::%s" % (x, y),
                                         Setup.analyzer_name_space)
        else:
            analyzer_name_space = ""
        Setup.token_class = "%s::%s" % (analyzer_name_space, Setup.token_class)

    # Token classes and derived classes have the freedom not to open a namespace,
    # thus no check 'if namespace == empty'.
    Setup.token_class_name,       \
    Setup.token_class_name_space, \
    Setup.token_class_name_safe = \
         read_namespaced_name(Setup.token_class,
                              "token class (options --token-class, --tc)")

    if Setup.show_name_spaces_f:
        print "Token: {"
        print "     class_name:  %s;" % Setup.token_class_name
        print "     name_space:  %s;" % repr(
            Setup.token_class_name_space)[1:-1]
        print "     name_prefix: %s;" % Setup.token_class_name_safe
        print "}"

    if Setup.token_class_file != "":
        blackboard.token_type_definition = \
                TokenTypeDescriptorManual(Setup.token_class_file,
                                          Setup.token_class_name,
                                          Setup.token_class_name_space,
                                          Setup.token_class_name_safe,
                                          Setup.token_id_type)
Ejemplo n.º 17
0
def __setup_token_class(Setup):
    """ X0::X1::X2::ClassName --> token_class_name = ClassName
                                  token_name_space = ["X0", "X1", "X2"]
        ::ClassName --> token_class_name = ClassName
                        token_name_space = []
        ClassName --> token_class_name = ClassName
                      token_name_space = analyzer_name_space
    """
    if Setup.token_class.find("::") == -1:
        # By default, Setup the token in the analyzer's namespace
        if len(Setup.analyzer_name_space) != 0:
            analyzer_name_space = reduce(lambda x, y: "%s::%s" % (x, y), Setup.analyzer_name_space)
        else:
            analyzer_name_space = ""
        Setup.token_class = "%s::%s" % (analyzer_name_space, Setup.token_class)

    # Token classes and derived classes have the freedom not to open a namespace,
    # thus no check 'if namespace == empty'.
    Setup.token_class_name,       \
    Setup.token_class_name_space, \
    Setup.token_class_name_safe = \
         read_namespaced_name(Setup.token_class, 
                              "token class (options --token-class, --tc)")

    if Setup.show_name_spaces_f:
        print "Token: {"
        print "     class_name:  %s;" % Setup.token_class_name
        print "     name_space:  %s;" % repr(Setup.token_class_name_space)[1:-1]
        print "     name_prefix: %s;" % Setup.token_class_name_safe   
        print "}"

    if Setup.token_class_file != "":
        blackboard.token_type_definition = \
                TokenTypeDescriptorManual(Setup.token_class_file,
                                          Setup.token_class_name,
                                          Setup.token_class_name_space,
                                          Setup.token_class_name_safe,
                                          Setup.token_id_type)
Ejemplo n.º 18
0
def __parse_section(fh, descriptor, already_defined_list):
    global token_type_code_fragment_db
    assert type(already_defined_list) == list

    SubsectionList = ["name", "file_name", "standard", "distinct", "union", "inheritable", "noid"] \
                      + token_type_code_fragment_db.keys()

    position = fh.tell()
    skip_whitespace(fh)
    word = read_identifier(fh)
    if word == "":
        fh.seek(position)
        if check(fh, "}"):
            fh.seek(position)
            return False
        error_msg(
            "Missing token_type section ('standard', 'distinct', or 'union').",
            fh)

    verify_word_in_list(
        word, SubsectionList,
        "Subsection '%s' not allowed in token_type section." % word, fh)

    if word == "name":
        if not check(fh, "="):
            error_msg("Missing '=' in token_type 'name' specification.", fh)
        descriptor.class_name, descriptor.name_space, descriptor.class_name_safe = read_namespaced_name(
            fh, "token_type")
        if not check(fh, ";"):
            error_msg(
                "Missing terminating ';' in token_type 'name' specification.",
                fh)

    elif word == "inheritable":
        descriptor.open_for_derivation_f = True
        check_or_die(fh, ";")

    elif word == "noid":
        descriptor.token_contains_token_id_f = False
        check_or_die(fh, ";")

    elif word == "file_name":
        if not check(fh, "="):
            error_msg("Missing '=' in token_type 'file_name' specification.",
                      fh)
        descriptor.set_file_name(read_until_letter(fh, ";"))
        if not check(fh, ";"):
            error_msg(
                "Missing terminating ';' in token_type 'file_name' specification.",
                fh)

    elif word in ["standard", "distinct", "union"]:
        if word == "standard":
            parse_standard_members(fh, word, descriptor, already_defined_list)
        elif word == "distinct":
            parse_distinct_members(fh, word, descriptor, already_defined_list)
        elif word == "union":
            parse_union_members(fh, word, descriptor, already_defined_list)

        if not check(fh, "}"):
            fh.seek(position)
            error_msg(
                "Missing closing '}' at end of token_type section '%s'." %
                word, fh)

    elif word in token_type_code_fragment_db.keys():
        fragment = code_fragment.parse(fh, word, AllowBriefTokenSenderF=False)
        descriptor.__dict__[word] = fragment

    else:
        assert False, "This code section section should not be reachable because 'word'\n" + \
                      "was checked to fit in one of the 'elif' cases."

    return True