Exemple #1
0
def prepare_default_standard_token_ids():
    global standard_token_id_list

    token_id_db["TERMINATION"]   = TokenInfo("TERMINATION",   ID=0)
    token_id_db["UNINITIALIZED"] = TokenInfo("UNINITIALIZED", ID=1)
    # Indentation Tokens
    token_id_db["INDENT"]        = TokenInfo("INDENT",        ID=2)
    token_id_db["DEDENT"]        = TokenInfo("DEDENT",        ID=3)
    token_id_db["NODENT"]        = TokenInfo("NODENT",        ID=4)

    # Assert that every standard token id is in the database
    for name in standard_token_id_list:
        assert token_id_db.has_key(name)
Exemple #2
0
def prepare_default_standard_token_ids():
    global standard_token_id_list

    token_id_db["TERMINATION"] = TokenInfo("TERMINATION", ID=0)
    token_id_db["UNINITIALIZED"] = TokenInfo("UNINITIALIZED", ID=1)
    # Indentation Tokens
    token_id_db["INDENT"] = TokenInfo("INDENT", ID=2)
    token_id_db["DEDENT"] = TokenInfo("DEDENT", ID=3)
    token_id_db["NODENT"] = TokenInfo("NODENT", ID=4)

    # Assert that every standard token id is in the database
    for name in standard_token_id_list:
        assert token_id_db.has_key(name)
Exemple #3
0
def do(setup):
    """Creates a file of token-ids from a given set of names.
       Creates also a function:

       const string& $$token$$::map_id_to_name().
    """
    global file_str
    LanguageDB = Setup.language_db

    __propose_implicit_token_definitions()

    for standard_token_id in standard_token_id_list:
        assert token_id_db.has_key(standard_token_id)

    assert blackboard.token_type_definition is not None, \
           "Token type has not been defined yet, see $QUEX_PATH/quex/core.py how to\n" + \
           "handle this."

    # (*) Token ID File ________________________________________________________________
    #
    #     The token id file can either be specified as database of
    #     token-id names, or as a file that directly assigns the token-ids
    #     to variables. If the flag '--user-token-id-file' is defined, then
    #     then the token-id file is provided by the user. Otherwise, the
    #     token id file is created by the token-id maker.
    #
    #     The token id maker considers the file passed by the option '-t'
    #     as the database file and creates a C++ file with the output filestem
    #     plus the suffix "--token-ids". Note, that the token id file is a
    #     header file.
    #
    if len(token_id_db.keys()) == len(standard_token_id_list):
        token_id_str = "%sTERMINATION and %sUNINITIALIZED" % \
                       (setup.token_id_prefix_plain, setup.token_id_prefix_plain) 
        # TERMINATION + UNINITIALIZED = 2 token ids. If they are the only ones nothing can be done.
        error_msg("Only token ids %s are defined.\n" % token_id_str + \
                  "Quex refuses to proceed. Please, use the 'token { ... }' section to\n" + \
                  "specify at least one other token id.")

    #______________________________________________________________________________________
    L = max(map(lambda name: len(name), token_id_db.keys()))
    def space(Name):
        return " " * (L - len(Name))

    # -- define values for the token ids
    def define_this(txt, token):
        if setup.language == "C":
            txt.append("#define %s%s %s((QUEX_TYPE_TOKEN_ID)%i)\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))
        else:
            txt.append("const QUEX_TYPE_TOKEN_ID %s%s%s = ((QUEX_TYPE_TOKEN_ID)%i);\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))

    if setup.token_id_foreign_definition_file != "":
        token_id_txt = ["#include \"%s\"\n" % Setup.get_file_reference(setup.token_id_foreign_definition_file)]

    else:
        if setup.language == "C": 
            prolog = ""
            epilog = ""
        else:
            prolog = LanguageDB.NAMESPACE_OPEN(setup.token_id_prefix_name_space)
            epilog = LanguageDB.NAMESPACE_CLOSE(setup.token_id_prefix_name_space)

        token_id_txt = [prolog]

        # Assign values to tokens with no numeric identifier
        # NOTE: This has not to happen if token's are defined by the user's provided file.
        i = setup.token_id_counter_offset
        # Take the 'dummy_name' only to have the list sorted by name. The key 'dummy_name' 
        # may contain '--' to indicate a unicode value, so do not use it as name.
        for dummy_name, token in sorted(token_id_db.items()):
            if token.number is None: 
                while __is_token_id_occupied(i):
                    i += 1
                token.number = i; 

            define_this(token_id_txt, token)

        # Double check that no token id appears twice
        # Again, this can only happen, if quex itself produced the numeric values for the token
        token_list = token_id_db.values()
        for i, x in enumerate(token_list):
            for y in token_list[i+1:]:
                if x.number != y.number: continue
                error_msg("Token id '%s'" % x.name, x.file_name, x.line_n, DontExitF=True)
                error_msg("and token id '%s' have same numeric value '%s'." \
                          % (y.name, x.number), y.file_name, y.line_n, DontExitF=True)
                          
        token_id_txt.append(epilog)

    content = blue_print(file_str,
                         [["$$TOKEN_ID_DEFINITIONS$$",        "".join(token_id_txt)],
                          ["$$DATE$$",                        time.asctime()],
                          ["$$TOKEN_CLASS_DEFINITION_FILE$$", Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                          ["$$TOKEN_PREFIX$$",                setup.token_id_prefix], 
                          ["$$INCLUDE_GUARD_EXT$$",           get_include_guard_extension(         \
                                                                  Setup.analyzer_name_safe.upper() \
                                                                + "__"                             \
                                                                + Setup.token_class_name_safe.upper())], 
                         ])

    return content
Exemple #4
0
def do(setup):
    """Creates a file of token-ids from a given set of names.
       Creates also a function:

       const string& $$token$$::map_id_to_name().
    """
    global file_str
    LanguageDB = Setup.language_db

    __propose_implicit_token_definitions()

    for standard_token_id in standard_token_id_list:
        assert token_id_db.has_key(standard_token_id)

    assert blackboard.token_type_definition is not None, \
           "Token type has not been defined yet, see $QUEX_PATH/quex/core.py how to\n" + \
           "handle this."

    # (*) Token ID File ________________________________________________________________
    #
    #     The token id file can either be specified as database of
    #     token-id names, or as a file that directly assigns the token-ids
    #     to variables. If the flag '--user-token-id-file' is defined, then
    #     then the token-id file is provided by the user. Otherwise, the
    #     token id file is created by the token-id maker.
    #
    #     The token id maker considers the file passed by the option '-t'
    #     as the database file and creates a C++ file with the output filestem
    #     plus the suffix "--token-ids". Note, that the token id file is a
    #     header file.
    #
    if len(token_id_db.keys()) == len(standard_token_id_list):
        token_id_str = "%sTERMINATION and %sUNINITIALIZED" % \
                       (setup.token_id_prefix_plain, setup.token_id_prefix_plain)
        # TERMINATION + UNINITIALIZED = 2 token ids. If they are the only ones nothing can be done.
        error_msg("Only token ids %s are defined.\n" % token_id_str + \
                  "Quex refuses to proceed. Please, use the 'token { ... }' section to\n" + \
                  "specify at least one other token id.")

    #______________________________________________________________________________________
    L = max(map(lambda name: len(name), token_id_db.keys()))

    def space(Name):
        return " " * (L - len(Name))

    # -- define values for the token ids
    def define_this(txt, token):
        if setup.language == "C":
            txt.append("#define %s%s %s((QUEX_TYPE_TOKEN_ID)%i)\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))
        else:
            txt.append("const QUEX_TYPE_TOKEN_ID %s%s%s = ((QUEX_TYPE_TOKEN_ID)%i);\n" \
                       % (setup.token_id_prefix_plain, token.name, space(token.name), token.number))

    if setup.token_id_foreign_definition_file != "":
        token_id_txt = [
            "#include \"%s\"\n" %
            Setup.get_file_reference(setup.token_id_foreign_definition_file)
        ]

    else:
        if setup.language == "C":
            prolog = ""
            epilog = ""
        else:
            prolog = LanguageDB.NAMESPACE_OPEN(
                setup.token_id_prefix_name_space)
            epilog = LanguageDB.NAMESPACE_CLOSE(
                setup.token_id_prefix_name_space)

        token_id_txt = [prolog]

        # Assign values to tokens with no numeric identifier
        # NOTE: This has not to happen if token's are defined by the user's provided file.
        i = setup.token_id_counter_offset
        # Take the 'dummy_name' only to have the list sorted by name. The key 'dummy_name'
        # may contain '--' to indicate a unicode value, so do not use it as name.
        for dummy_name, token in sorted(token_id_db.items()):
            if token.number is None:
                while __is_token_id_occupied(i):
                    i += 1
                token.number = i

            define_this(token_id_txt, token)

        # Double check that no token id appears twice
        # Again, this can only happen, if quex itself produced the numeric values for the token
        token_list = token_id_db.values()
        for i, x in enumerate(token_list):
            for y in token_list[i + 1:]:
                if x.number != y.number: continue
                error_msg("Token id '%s'" % x.name,
                          x.file_name,
                          x.line_n,
                          DontExitF=True)
                error_msg("and token id '%s' have same numeric value '%s'." \
                          % (y.name, x.number), y.file_name, y.line_n, DontExitF=True)

        token_id_txt.append(epilog)

    content = blue_print(file_str,
                         [["$$TOKEN_ID_DEFINITIONS$$",        "".join(token_id_txt)],
                          ["$$DATE$$",                        time.asctime()],
                          ["$$TOKEN_CLASS_DEFINITION_FILE$$", Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                          ["$$TOKEN_PREFIX$$",                setup.token_id_prefix],
                          ["$$INCLUDE_GUARD_EXT$$",           get_include_guard_extension(         \
                                                                  Setup.analyzer_name_safe.upper() \
                                                                + "__"                             \
                                                                + Setup.token_class_name_safe.upper())],
                         ])

    return content