Exemplo n.º 1
0
def __warn_on_double_definition():
    """Double check that no token id appears twice. Again, this can only happen,
    if quex itself produced the numeric values for the token.

    If the token ids come from outside, Quex does not know the numeric value. It 
    cannot warn about double definitions.
    """
    assert len(Setup.token_id_foreign_definition_file) == 0

    clash_db = defaultdict(list)

    token_list = token_id_db.values()
    for i, x in enumerate(token_list):
        for y in token_list[i+1:]:
            if x.number != y.number: continue
            clash_db[x.number].append(x)
            clash_db[x.number].append(y)

    def find_source_reference(TokenList):
        for token in TokenList:
            if token.sr.is_void(): continue
            return token.sr
        return None
    
    if len(clash_db) != 0:
        item_list = clash_db.items()
        item_list.sort()
        sr = find_source_reference(item_list[0][1])
        error.warning("Following token ids have the same numeric value assigned:", sr)
        for x, token_id_list in item_list:
            sr = find_source_reference(token_id_list)
            token_ids_sorted = sorted(list(set(token_id_list)), key=attrgetter("name")) # Ensure uniqueness
            error.warning("  %s: %s" % (x, "".join(["%s, " % t.name for t in token_ids_sorted])), 
                          sr)
Exemplo n.º 2
0
def __warn_on_double_definition():
    """Double check that no token id appears twice. Again, this can only happen,
    if quex itself produced the numeric values for the token.

    If the token ids come from outside, Quex does not know the numeric value. It 
    cannot warn about double definitions.
    """
    assert len(Setup.token_id_foreign_definition_file) == 0

    clash_db = defaultdict(list)

    token_list = token_id_db.values()
    for i, x in enumerate(token_list):
        for y in token_list[i+1:]:
            if x.number != y.number: continue
            clash_db[x.number].append(x)
            clash_db[x.number].append(y)

    def find_source_reference(TokenList):
        for token in TokenList:
            if token.sr.is_void(): continue
            return token.sr
        return None
    
    if len(clash_db) != 0:
        item_list = clash_db.items()
        item_list.sort()
        sr = find_source_reference(item_list[0][1])
        error_msg("Following token ids have the same numeric value assigned:", 
                  sr, DontExitF=True)
        for x, token_id_list in item_list:
            sr = find_source_reference(token_id_list)
            token_ids_sorted = sorted(list(set(token_id_list)), key=attrgetter("name")) # Ensure uniqueness
            error_msg("  %s: %s" % (x, "".join(["%s, " % t.name for t in token_ids_sorted])), 
                      sr, DontExitF=True)
Exemplo n.º 3
0
def __is_token_id_occupied(TokenID):
    return TokenID in map(lambda x: x.number, token_id_db.values())
Exemplo n.º 4
0
def do(setup):
    """Creates a file of token-ids from a given set of names.
       Creates also a function:

       const string& $$token$$::map_id_to_name().
    """
    global file_str
    LanguageDB = Setup.language_db

    __propose_implicit_token_definitions()

    for standard_token_id in standard_token_id_list:
        assert token_id_db.has_key(standard_token_id)

    assert blackboard.token_type_definition is not None, \
           "Token type has not been defined yet, see $QUEX_PATH/quex/core.py how to\n" + \
           "handle this."

    # (*) Token ID File ________________________________________________________________
    #
    #     The token id file can either be specified as database of
    #     token-id names, or as a file that directly assigns the token-ids
    #     to variables. If the flag '--user-token-id-file' is defined, then
    #     then the token-id file is provided by the user. Otherwise, the
    #     token id file is created by the token-id maker.
    #
    #     The token id maker considers the file passed by the option '-t'
    #     as the database file and creates a C++ file with the output filestem
    #     plus the suffix "--token-ids". Note, that the token id file is a
    #     header file.
    #
    if len(token_id_db.keys()) == len(standard_token_id_list):
        token_id_str = "%sTERMINATION and %sUNINITIALIZED" % \
                       (setup.token_id_prefix_plain, setup.token_id_prefix_plain) 
        # TERMINATION + UNINITIALIZED = 2 token ids. If they are the only ones nothing can be done.
        error_msg("Only token ids %s are defined.\n" % token_id_str + \
                  "Quex refuses to proceed. Please, use the 'token { ... }' section to\n" + \
                  "specify at least one other token id.")

    #______________________________________________________________________________________
    L = max(map(lambda name: len(name), token_id_db.keys()))
    def space(Name):
        return " " * (L - len(Name))

    # -- define values for the token ids
    def define_this(txt, token):
        if setup.language == "C":
            txt.append("#define %s%s %s((QUEX_TYPE_TOKEN_ID)%i)\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))
        else:
            txt.append("const QUEX_TYPE_TOKEN_ID %s%s%s = ((QUEX_TYPE_TOKEN_ID)%i);\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))

    if setup.token_id_foreign_definition_file != "":
        token_id_txt = ["#include \"%s\"\n" % Setup.get_file_reference(setup.token_id_foreign_definition_file)]

    else:
        if setup.language == "C": 
            prolog = ""
            epilog = ""
        else:
            prolog = LanguageDB.NAMESPACE_OPEN(setup.token_id_prefix_name_space)
            epilog = LanguageDB.NAMESPACE_CLOSE(setup.token_id_prefix_name_space)

        token_id_txt = [prolog]

        # Assign values to tokens with no numeric identifier
        # NOTE: This has not to happen if token's are defined by the user's provided file.
        i = setup.token_id_counter_offset
        # Take the 'dummy_name' only to have the list sorted by name. The key 'dummy_name' 
        # may contain '--' to indicate a unicode value, so do not use it as name.
        for dummy_name, token in sorted(token_id_db.items()):
            if token.number is None: 
                while __is_token_id_occupied(i):
                    i += 1
                token.number = i; 

            define_this(token_id_txt, token)

        # Double check that no token id appears twice
        # Again, this can only happen, if quex itself produced the numeric values for the token
        token_list = token_id_db.values()
        for i, x in enumerate(token_list):
            for y in token_list[i+1:]:
                if x.number != y.number: continue
                error_msg("Token id '%s'" % x.name, x.file_name, x.line_n, DontExitF=True)
                error_msg("and token id '%s' have same numeric value '%s'." \
                          % (y.name, x.number), y.file_name, y.line_n, DontExitF=True)
                          
        token_id_txt.append(epilog)

    content = blue_print(file_str,
                         [["$$TOKEN_ID_DEFINITIONS$$",        "".join(token_id_txt)],
                          ["$$DATE$$",                        time.asctime()],
                          ["$$TOKEN_CLASS_DEFINITION_FILE$$", Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                          ["$$TOKEN_PREFIX$$",                setup.token_id_prefix], 
                          ["$$INCLUDE_GUARD_EXT$$",           get_include_guard_extension(         \
                                                                  Setup.analyzer_name_safe.upper() \
                                                                + "__"                             \
                                                                + Setup.token_class_name_safe.upper())], 
                         ])

    return content
Exemplo n.º 5
0
def __is_token_id_occupied(TokenID):
    return TokenID in map(lambda x: x.number, token_id_db.values())
Exemplo n.º 6
0
def do(setup):
    """Creates a file of token-ids from a given set of names.
       Creates also a function:

       const string& $$token$$::map_id_to_name().
    """
    global file_str
    LanguageDB = Setup.language_db

    __propose_implicit_token_definitions()

    for standard_token_id in standard_token_id_list:
        assert token_id_db.has_key(standard_token_id)

    assert blackboard.token_type_definition is not None, \
           "Token type has not been defined yet, see $QUEX_PATH/quex/core.py how to\n" + \
           "handle this."

    # (*) Token ID File ________________________________________________________________
    #
    #     The token id file can either be specified as database of
    #     token-id names, or as a file that directly assigns the token-ids
    #     to variables. If the flag '--user-token-id-file' is defined, then
    #     then the token-id file is provided by the user. Otherwise, the
    #     token id file is created by the token-id maker.
    #
    #     The token id maker considers the file passed by the option '-t'
    #     as the database file and creates a C++ file with the output filestem
    #     plus the suffix "--token-ids". Note, that the token id file is a
    #     header file.
    #
    if len(token_id_db.keys()) == len(standard_token_id_list):
        token_id_str = "%sTERMINATION and %sUNINITIALIZED" % \
                       (setup.token_id_prefix_plain, setup.token_id_prefix_plain)
        # TERMINATION + UNINITIALIZED = 2 token ids. If they are the only ones nothing can be done.
        error_msg("Only token ids %s are defined.\n" % token_id_str + \
                  "Quex refuses to proceed. Please, use the 'token { ... }' section to\n" + \
                  "specify at least one other token id.")

    #______________________________________________________________________________________
    L = max(map(lambda name: len(name), token_id_db.keys()))

    def space(Name):
        return " " * (L - len(Name))

    # -- define values for the token ids
    def define_this(txt, token):
        if setup.language == "C":
            txt.append("#define %s%s %s((QUEX_TYPE_TOKEN_ID)%i)\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))
        else:
            txt.append("const QUEX_TYPE_TOKEN_ID %s%s%s = ((QUEX_TYPE_TOKEN_ID)%i);\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))

    if setup.token_id_foreign_definition_file != "":
        token_id_txt = [
            "#include \"%s\"\n" %
            Setup.get_file_reference(setup.token_id_foreign_definition_file)
        ]

    else:
        if setup.language == "C":
            prolog = ""
            epilog = ""
        else:
            prolog = LanguageDB.NAMESPACE_OPEN(
                setup.token_id_prefix_name_space)
            epilog = LanguageDB.NAMESPACE_CLOSE(
                setup.token_id_prefix_name_space)

        token_id_txt = [prolog]

        # Assign values to tokens with no numeric identifier
        # NOTE: This has not to happen if token's are defined by the user's provided file.
        i = setup.token_id_counter_offset
        # Take the 'dummy_name' only to have the list sorted by name. The key 'dummy_name'
        # may contain '--' to indicate a unicode value, so do not use it as name.
        for dummy_name, token in sorted(token_id_db.items()):
            if token.number is None:
                while __is_token_id_occupied(i):
                    i += 1
                token.number = i

            define_this(token_id_txt, token)

        # Double check that no token id appears twice
        # Again, this can only happen, if quex itself produced the numeric values for the token
        token_list = token_id_db.values()
        for i, x in enumerate(token_list):
            for y in token_list[i + 1:]:
                if x.number != y.number: continue
                error_msg("Token id '%s'" % x.name,
                          x.file_name,
                          x.line_n,
                          DontExitF=True)
                error_msg("and token id '%s' have same numeric value '%s'." \
                          % (y.name, x.number), y.file_name, y.line_n, DontExitF=True)

        token_id_txt.append(epilog)

    content = blue_print(file_str,
                         [["$$TOKEN_ID_DEFINITIONS$$",        "".join(token_id_txt)],
                          ["$$DATE$$",                        time.asctime()],
                          ["$$TOKEN_CLASS_DEFINITION_FILE$$", Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                          ["$$TOKEN_PREFIX$$",                setup.token_id_prefix],
                          ["$$INCLUDE_GUARD_EXT$$",           get_include_guard_extension(         \
                                                                  Setup.analyzer_name_safe.upper() \
                                                                + "__"                             \
                                                                + Setup.token_class_name_safe.upper())],
                         ])

    return content