コード例 #1
0
ファイル: mode.py プロジェクト: smmckay/quex3
def __determine_initial_mode(ModePrepDb):
    assert not Setup.token_class_only_f
    assert not Setup.converter_only_f

    if not blackboard.initial_mode.sr.is_void():
        return

    # Choose an applicable mode as start mode
    first_candidate = None
    for name, mode in ModePrepDb.iteritems():
        if not mode.implemented_f():
            continue
        elif first_candidate is not None:
            error.log("No initial mode defined via 'start' while more than one applicable mode exists.\n" + \
                      "Use for example 'start = %s;' in the quex source file to define an initial mode." \
                      % first_candidate.name)
        else:
            first_candidate = mode

    if first_candidate is None:
        error.log(
            "No mode that can be implemented--all modes <inheritable: only>.")
    else:
        blackboard.initial_mode = CodeUser(first_candidate.name,
                                           SourceReference=first_candidate.sr)
コード例 #2
0
def __parse_brief_token_sender(fh, ContinueF):
    # shorthand for { self.send(TKN_SOMETHING); QUEX_SETTING_AFTER_SEND_CONTINUE_OR_RETURN(); }

    position = fh.tell()
    try:
        skip_whitespace(fh)
        position = fh.tell()

        code = __parse_token_id_specification_by_character_code(fh)
        if code != -1:
            code = __create_token_sender_by_character_code(fh, code)
        else:
            skip_whitespace(fh)
            identifier = __read_token_identifier(fh)
            skip_whitespace(fh)
            if identifier in ["GOTO", "GOSUB", "GOUP"]:
                code = __create_mode_transition_and_token_sender(
                    fh, identifier)
            else:
                code = __create_token_sender_by_token_name(fh, identifier)
                check_or_die(fh, ";")

        if len(code) != 0:
            if ContinueF:
                code += "QUEX_SETTING_AFTER_SEND_CONTINUE_OR_RETURN();\n"
            return CodeUser(code, SourceRef.from_FileHandle(fh))
        else:
            return None

    except EndOfStreamException:
        fh.seek(position)
        error.error_eof("token", fh)
コード例 #3
0
ファイル: token_type.py プロジェクト: smmckay/quex3
    def __init__(self, Core=None):
        if Core is None:
            # Consistency is maintained via properties (see below constructor)
            #    self.class_name      = Setup.token_class_name
            #    self.class_name_safe = Setup.token_class_name_safe
            #    self.name_space      = Setup.token_class_name_space
            #    self.token_id_type   = Setup.token_id_type

            # Consistency maintained via 'set_file_name/get_file_name'
            #    self._file_name                = Setup.output_token_class_file
            #    self._file_name_implementation = Setup.output_token_class_file_implementation

            self.token_id_type_defined_f = False
            self.open_for_derivation_f = False
            self.token_contains_token_id_f = True
            self.token_id_type = "size_t"
            self.column_number_type = CodeUser("size_t", SourceRef())
            self.line_number_type = CodeUser("size_t", SourceRef())

            self.distinct_db = OrderedDict()  # See comment [MEMBER PACKAGING]
            self.union_db = OrderedDict()  # See comment [MEMBER PACKAGING]

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            # Consistency is maintained via properties (see below constructor)
            Setup.token_class_name = Core.class_name
            Setup.token_class_name_safe = Core.class_name_safe
            Setup.token_class_name_space = Core.name_space
            Setup.token_id_type = Core.token_id_type

            # Consistency maintained via 'set_file_name/get_file_name'
            #    self._file_name                = Setup.output_token_class_file
            #    self._file_name_implementation = Setup.output_token_class_file_implementation

            self.token_id_type_defined_f = Core.token_id_type_defined_f
            self.open_for_derivation_f = Core.open_for_derivation_f
            self.token_contains_token_id_f = Core.token_contains_token_id_f
            self.column_number_type = Core.column_number_type
            self.line_number_type = Core.line_number_type

            self.distinct_db = Core.distinct_db
            self.union_db = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
コード例 #4
0
ファイル: code_fragment.py プロジェクト: smmckay/quex3
def get_CodeUser_for_token_sending(fh, Identifier, Position, LexemeNullF,
                                   LexemeF):
    token_name = "%s%s" % (Setup.token_id_prefix_plain, Identifier)
    code_raw = __create_token_sender_by_token_name(fh,
                                                   token_name,
                                                   LexemeNullOnlyF=LexemeNullF,
                                                   LexemeOnlyF=LexemeF)
    return CodeUser(code_raw, SourceRef.from_FileHandle(fh, BeginPos=Position))
コード例 #5
0
def do(TxtList, DELETED_Op):
    mode_prep_prep_db = {}
    for txt in TxtList:
        sh = StringIO(txt)
        sh.name = "<string>"
        mode.parse(sh, mode_prep_prep_db)

    blackboard.initial_mode = CodeUser("X", SourceRef.from_FileHandle(sh))

    mode_prep_db = mode.__finalize_modes_prep(mode_prep_prep_db)
    for x in sorted(mode_prep_db.itervalues(), key=lambda x: x.name):
        print "Mode: '%s'" % x.name
        for i, pattern in enumerate(x.pattern_list):
            terminal = x.terminal_db[pattern.incidence_id]
            print "(%i) %s {%s}" % (i, pattern.pattern_string(), "".join(
                terminal.pure_code()).strip())
コード例 #6
0
ファイル: match_precedence_test.py プロジェクト: xxyzzzq/quex
def do(TxtList, DELETED_Op):
    blackboard.mode_description_db.clear()
    for txt in TxtList:
        sh = StringIO(txt)
        sh.name = "<string>"
        mode.parse(sh)

    blackboard.initial_mode = CodeUser("X", SourceRef.from_FileHandle(sh))
    blackboard_mode_db_setup(blackboard.mode_description_db)

    for x in sorted(blackboard.mode_db.itervalues(), key=lambda x: x.name):
        print "Mode: '%s'" % x.name
        for i, pattern in enumerate(x.pattern_list):
            terminal = x.terminal_db[pattern.incidence_id()]
            print "(%i) %s {%s}" % (i, pattern.pattern_string(), "".join(
                terminal.pure_code()).strip())
コード例 #7
0
ファイル: mode.py プロジェクト: xxyzzzq/quex
def determine_start_mode(mode_db):
    if not blackboard.initial_mode.sr.is_void():
        return

    # Choose an applicable mode as start mode
    first_candidate = None
    for name, mode in mode_db.iteritems():
        if mode.abstract_f(): 
            continue
        elif first_candidate is not None:
            error.log("No initial mode defined via 'start' while more than one applicable mode exists.\n" + \
                      "Use for example 'start = %s;' in the quex source file to define an initial mode." \
                      % first_candidate.name)
        else:
            first_candidate = mode

    if first_candidate is None:
        error.log("No mode that can be implemented--all modes <inheritable: only>.")
    else:
        blackboard.initial_mode = CodeUser(first_candidate.name, SourceReference=first_candidate.sr)
コード例 #8
0
ファイル: code_fragment.py プロジェクト: smmckay/quex3
def __parse_brief_token_sender(fh):
    # shorthand for { self.send(TKN_SOMETHING); RETURN; }

    position = fh.tell()
    try:
        skip_whitespace(fh)
        position = fh.tell()

        code = __parse_token_id_specification_by_character_code(fh)
        if code != -1:
            code = __create_token_sender_by_character_code(fh, code)
        else:
            skip_whitespace(fh)
            identifier = __read_token_identifier(fh)
            skip_whitespace(fh)
            if identifier in ["GOTO", "GOSUB", "GOUP"]:
                code = __create_mode_transition_and_token_sender(
                    fh, identifier)
            else:
                code = __create_token_sender_by_token_name(fh, identifier)
                check_or_die(fh, ";")

        if code:
            # IMPORTANT: For handlers 'on_end_of_stream' and 'on_failure',
            #            => CONTINUE would be desastrous!
            # -- When a termination token is sent, no other token shall follow.
            #    Return MUST be enforced               => Do not allow CONTINUE!
            # -- When an 'on_failure' is detected allow immediate action of the
            #    receiver.                             => Do not allow CONTINUE!
            code += "\n%s\n" % Lng.PURE_RETURN  # Immediate RETURN after token sending
            return CodeUser(code,
                            SourceRef.from_FileHandle(fh, BeginPos=position))
        else:
            return None

    except EndOfStreamException:
        fh.seek(position)
        error.error_eof("token", fh)
コード例 #9
0
    def __init__(self, Core=None):
        if Core is None:
            self._file_name                = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name, 
                                             "token class (options --token-class, --tc)")
            self.class_name            = Setup.token_class_name
            self.class_name_safe       = Setup.token_class_name_safe
            self.name_space            = Setup.token_class_name_space
            self.open_for_derivation_f      = False
            self.token_contains_token_id_f  = True
            self.token_id_type         = CodeUser("size_t", SourceRef())
            self.column_number_type    = CodeUser("size_t", SourceRef())
            self.line_number_type      = CodeUser("size_t", SourceRef())

            self.distinct_db = {}
            self.union_db    = {}

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            self._file_name                = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name            = Core.class_name
            self.class_name_safe       = Core.class_name_safe
            self.name_space            = Core.name_space
            self.open_for_derivation_f      = Core.open_for_derivation_f
            self.token_contains_token_id_f  = Core.token_contains_token_id_f
            self.token_id_type         = Core.token_id_type
            self.column_number_type    = Core.column_number_type
            self.line_number_type      = Core.line_number_type

            self.distinct_db           = Core.distinct_db
            self.union_db              = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
コード例 #10
0
ファイル: core.py プロジェクト: nyulacska/gpr
def parse_section(fh):
    global default_token_type_definition_triggered_by_mode_definition_f

    # NOTE: End of File is supposed to be reached when trying to read a new
    #       section. Thus, the end-of-file catcher does not encompass the beginning.
    position = fh.tell()
    skip_whitespace(fh)
    word = read_identifier(fh, OnMissingStr="Missing section title")

    error.verify_word_in_list(word, blackboard.all_section_title_list,
                              "Unknown quex section '%s'" % word, fh)
    try:
        # (*) determine what is defined
        #
        #     -- 'mode { ... }'     => define a mode
        #     -- 'start = ...;'     => define the name of the initial mode
        #     -- 'header { ... }'   => define code that is to be pasted on top
        #                              of the engine (e.g. "#include<...>")
        #     -- 'body { ... }'     => define code that is to be pasted in the class' body
        #                              of the engine (e.g. "public: int  my_member;")
        #     -- 'init { ... }'     => define code that is to be pasted in the class' constructors
        #                              of the engine (e.g. "my_member = -1;")
        #     -- 'define { ... }'   => define patterns shorthands such as IDENTIFIER for [a-z]+
        #     -- 'repeated_token_id = QUEX_TKN_ ...;' => enables token repetition, defines
        #                                                the token id to be repeated.
        #     -- 'token { ... }'    => define token ids
        #     -- 'token_type { ... }'  => define a customized token type
        #
        if word in blackboard.fragment_db.keys():
            element_name = blackboard.fragment_db[word]
            fragment = code_fragment.parse(fh,
                                           word,
                                           AllowBriefTokenSenderF=False)
            blackboard.__dict__[element_name] = fragment
            return

        elif word == "start":
            mode_name = parse_identifier_assignment(fh)
            if mode_name == "":
                error.log("Missing mode_name after 'start ='", fh)

            elif not blackboard.initial_mode.sr.is_void():
                error.log("start mode defined more than once!",
                          fh,
                          DontExitF=True)
                error.log("previously defined here",
                          blackboard.initial_mode.sr)

            blackboard.initial_mode = CodeUser(mode_name,
                                               SourceRef.from_FileHandle(fh))
            return

        elif word == "repeated_token":
            blackboard.token_repetition_token_id_list = parse_token_id_definitions(
                fh, NamesOnlyF=True)
            for token_name in blackboard.token_repetition_token_id_list:
                error.verify_word_in_list(
                    token_name[len(Setup.token_id_prefix):],
                    blackboard.token_id_db.keys(),
                    "Token ID '%s' not yet defined." % token_name,
                    fh,
                    ExitF=False,
                    SuppressCode=NotificationDB.
                    warning_repeated_token_not_yet_defined)
            return

        elif word == "define":
            parse_pattern_name_definitions(fh)
            return

        elif word == "token":
            if Setup.token_id_foreign_definition:
                error.log("Token id file '%s' has been specified.\n" \
                          % Setup.token_id_foreign_definition_file \
                          + "All token ids must be specified there. Section 'token'\n" \
                          + "is not allowed.", fh)

            parse_token_id_definitions(fh)
            return

        elif word == "token_type":

            if Setup.token_class_file != "":
                error.log("Section 'token_type' is intended to generate a token class.\n" \
                          + "However, the manually written token class file '%s'" \
                          % repr(Setup.token_class_file) \
                          + "has been specified on the command line.",
                          fh)

            if blackboard.token_type_definition is None:
                blackboard.token_type_definition = token_type.parse(fh)
                return

            # Error case:
            if default_token_type_definition_triggered_by_mode_definition_f:
                error.log(
                    "Section 'token_type' must appear before first mode definition.",
                    fh)
            else:
                error.log("Section 'token_type' has been defined twice.",
                          fh,
                          DontExitF=True)
                error.log("Previously defined here.",
                          blackboard.token_type_definition.sr)
            return

        elif word == "mode":
            # When the first mode is parsed then a token_type definition must be
            # present. If not, the default token type definition is considered.
            if blackboard.token_type_definition is None:
                parse_default_token_definition()
                default_token_type_definition_triggered_by_mode_definition_f = True

            mode.parse(fh)
            return

        else:
            # This case should have been caught by the 'verify_word_in_list' function
            assert False

    except EndOfStreamException:
        fh.seek(position)
        error.error_eof(word, fh)
コード例 #11
0
def __parse_normal(fh, code_fragment_name):
    code = read_until_closing_bracket(fh, "{", "}")
    return CodeUser(code, SourceRef.from_FileHandle(fh))
コード例 #12
0
def parse_variable_definition(fh, GroupF=False, already_defined_list=[]):
    """PURPOSE: Parsing of a variable definition consisting of 'type' and 'name.
                Members can be mentioned together in a group, which means that
                they can appear simultaneously. Possible expresions are

                (1) single variables:

                              name0 : type;
                              name1 : type[32];
                              name2 : type*;

                (2) combined variables

                              {
                                  sub_name0 : type0;
                                  sub_name1 : type[64];
                                  sub_name2 : type1*;
                              }

       ARGUMENTS: 

        'GroupF'               allows to have 'nested variable groups' in curly brackets

        'already_defined_list' informs about variable names that have been already
                               chosen. It is only used for groups.

       RETURNS:
                 None        on failure to pass a variable definition.
                 array       when a single variable definition was found. 
                                array[0] = UserCodeFragment containing the type. 
                                array[1] = name of the variable.
                 dictionary  if it was a combined variable definition. The dictionary
                               maps: (variable name) ---> (UserCodeFragment with type)
    
    """
    position = fh.tell()

    skip_whitespace(fh)
    name_str = read_identifier(fh)
    if name_str == "":
        if not GroupF or not check(fh, "{"): 
            fh.seek(position); 
            return None
        sub_db = parse_variable_definition_list(fh, "Concurrent union variables", already_defined_list)
        if not check(fh, "}"): 
            fh.seek(position)
            error.log("Missing closing '}' after concurrent variable definition.", fh)
        return [ sub_db ]

    else:
        name_str = name_str.strip()
        if not check(fh, ":"): error.log("Missing ':' after identifier '%s'." % name_str, fh)
        
        if fh.read(1).isspace() == False:
            error.log("Missing whitespace after ':' after identifier '%s'.\n" % name_str \
                    + "The notation has to be: variable-name ':' type ';'.", fh)

        type_str, i = read_until_letter(fh, ";", Verbose=True)
        if i == -1: error.log("missing ';'", fh)
        type_str = type_str.strip()

        return [ CodeUser(type_str, SourceRef.from_FileHandle(fh)), name_str ]
コード例 #13
0
class TokenTypeDescriptorCore:
    """Object used during the generation of the TokenTypeDescriptor."""
    def __init__(self, Core=None):
        if Core is None:
            self._file_name                = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name, 
                                             "token class (options --token-class, --tc)")
            self.class_name            = Setup.token_class_name
            self.class_name_safe       = Setup.token_class_name_safe
            self.name_space            = Setup.token_class_name_space
            self.open_for_derivation_f      = False
            self.token_contains_token_id_f  = True
            self.token_id_type         = CodeUser("size_t", SourceRef())
            self.column_number_type    = CodeUser("size_t", SourceRef())
            self.line_number_type      = CodeUser("size_t", SourceRef())

            self.distinct_db = {}
            self.union_db    = {}

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            self._file_name                = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name            = Core.class_name
            self.class_name_safe       = Core.class_name_safe
            self.name_space            = Core.name_space
            self.open_for_derivation_f      = Core.open_for_derivation_f
            self.token_contains_token_id_f  = Core.token_contains_token_id_f
            self.token_id_type         = Core.token_id_type
            self.column_number_type    = Core.column_number_type
            self.line_number_type      = Core.line_number_type

            self.distinct_db           = Core.distinct_db
            self.union_db              = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
            
    def set_file_name(self, FileName):
        self._file_name = FileName
        ext = Lng[Setup.language].extension_db[Setup.output_file_naming_scheme][E_Files.HEADER_IMPLEMTATION]
        self._file_name_implementation = FileName + ext

    def __repr__(self):
        txt = ""
        if self._file_name != "": 
            txt += "file name: '%s'\n" % self._file_name
        txt += "class:     '%s'\n" % self.class_name
        if self.open_for_derivation_f: 
            txt += "           (with virtual destructor)\n"
        if self.token_contains_token_id_f == False:
            txt += "           (token id not part of token object)\n"
        txt += "namespace: '%s'\n" % repr(self.name_space)[1:-1]
        txt += "type(token_id)      = %s\n" % self.token_id_type.get_text()
        txt += "type(column_number) = %s\n" % self.column_number_type.get_text()
        txt += "type(line_number)   = %s\n" % self.line_number_type.get_text()

        txt += "distinct members {\n"
        # '0' to make sure, that it works on an empty sequence too.
        L = self.distinct_members_type_name_length_max()
        for name, type_code in self.distinct_db.items():
            txt += "    %s%s %s\n" % (type_code.get_text(), " " * (L - len(type_code.get_text())), name)
        txt += "}\n"
        txt += "union members {\n"

        # '0' to make sure, that it works on an empty sequence too.
        L = self.union_members_type_name_length_max()
        for name, type_descr in self.union_db.items():
            if type(type_descr) == dict:
                txt += "    {\n"
                for sub_name, sub_type in type_descr.items():
                    txt += "        %s%s %s\n" % \
                           (sub_type.get_text(), 
                            " " * (L - len(sub_type.get_text())-4), 
                            sub_name)
                txt += "    }\n"
            else:
                txt += "    %s%s %s\n" % \
                       (type_descr.get_text(), 
                        " " * (L - len(type_descr.get_text())), 
                        name)
        txt += "}\n"

        # constructor / copy / destructor
        if not self.constructor.is_whitespace():
            txt += "constructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.constructor)
            txt += "}"
        
        if self.copy is not None:
            txt += "copy {\n"
            txt += Lng.SOURCE_REFERENCED(self.copy)
            txt += "}"

        if not self.destructor.is_whitespace():
            txt += "destructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.destructor)
            txt += "}"

        if not self.body.is_whitespace():
            txt += "body {\n"
            txt += Lng.SOURCE_REFERENCED(self.body)
            txt += "}"

        return txt

    def manually_written(self):
        return False
コード例 #14
0
ファイル: code_fragment.py プロジェクト: smmckay/quex3
def __parse_normal(fh, code_fragment_name):
    position = fh.tell()
    code = read_until_closing_bracket(fh, "{", "}")
    return CodeUser(code, SourceRef.from_FileHandle(fh, BeginPos=position))
コード例 #15
0
ファイル: token_type.py プロジェクト: smmckay/quex3
class TokenTypeDescriptorCore(object):
    """Object used during the generation of the TokenTypeDescriptor."""
    def __init__(self, Core=None):
        if Core is None:
            # Consistency is maintained via properties (see below constructor)
            #    self.class_name      = Setup.token_class_name
            #    self.class_name_safe = Setup.token_class_name_safe
            #    self.name_space      = Setup.token_class_name_space
            #    self.token_id_type   = Setup.token_id_type

            # Consistency maintained via 'set_file_name/get_file_name'
            #    self._file_name                = Setup.output_token_class_file
            #    self._file_name_implementation = Setup.output_token_class_file_implementation

            self.token_id_type_defined_f = False
            self.open_for_derivation_f = False
            self.token_contains_token_id_f = True
            self.token_id_type = "size_t"
            self.column_number_type = CodeUser("size_t", SourceRef())
            self.line_number_type = CodeUser("size_t", SourceRef())

            self.distinct_db = OrderedDict()  # See comment [MEMBER PACKAGING]
            self.union_db = OrderedDict()  # See comment [MEMBER PACKAGING]

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            # Consistency is maintained via properties (see below constructor)
            Setup.token_class_name = Core.class_name
            Setup.token_class_name_safe = Core.class_name_safe
            Setup.token_class_name_space = Core.name_space
            Setup.token_id_type = Core.token_id_type

            # Consistency maintained via 'set_file_name/get_file_name'
            #    self._file_name                = Setup.output_token_class_file
            #    self._file_name_implementation = Setup.output_token_class_file_implementation

            self.token_id_type_defined_f = Core.token_id_type_defined_f
            self.open_for_derivation_f = Core.open_for_derivation_f
            self.token_contains_token_id_f = Core.token_contains_token_id_f
            self.column_number_type = Core.column_number_type
            self.line_number_type = Core.line_number_type

            self.distinct_db = Core.distinct_db
            self.union_db = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]

    # Maintain consistency with the token class naming provided in 'Setup'.
    # => User properties, in order to only store in 'Setup'.
    @property
    def class_name(self):
        return Setup.token_class_name

    @class_name.setter
    def class_name(self, N):
        Setup.token_class_name = N

    @property
    def class_name_safe(self):
        return Setup.token_class_name_safe

    @class_name_safe.setter
    def class_name_safe(self, N):
        Setup.token_class_name_safe = N

    @property
    def name_space(self):
        return Setup.token_class_name_space

    @name_space.setter
    def name_space(self, N):
        Setup.token_class_name_space = N

    @property
    def token_id_type(self):
        return Setup.token_id_type

    @token_id_type.setter
    @typed(Value=(str, unicode))
    def token_id_type(self, Value):
        Setup.token_id_type = Value
        self.token_id_type_defined_f = True

    def set_file_name(self, FileName):
        ext = Lng.extension_db[E_Files.HEADER_IMPLEMTATION]
        Setup.output_token_class_file = FileName
        Setup.output_token_class_file_implementation = FileName + ext

    def __repr__(self):
        txt = ""
        if self.get_file_name() != "":
            txt += "file name: '%s'\n" % self.get_file_name()
        txt += "class:     '%s'\n" % self.class_name
        if self.open_for_derivation_f:
            txt += "           (with virtual destructor)\n"
        if self.token_contains_token_id_f == False:
            txt += "           (token id not part of token object)\n"
        txt += "namespace: '%s'\n" % repr(self.name_space)[1:-1]
        txt += "type(token_id)      = %s\n" % self.token_id_type
        txt += "type(column_number) = %s\n" % self.column_number_type.get_text(
        )
        txt += "type(line_number)   = %s\n" % self.line_number_type.get_text()

        txt += "distinct members {\n"
        # '0' to make sure, that it works on an empty sequence too.
        L = self.distinct_members_type_name_length_max()
        for name, type_code in self.distinct_db.items():
            txt += "    %s%s %s\n" % (type_code.get_text(), " " *
                                      (L - len(type_code.get_text())), name)
        txt += "}\n"
        txt += "union members {\n"

        # '0' to make sure, that it works on an empty sequence too.
        L = self.union_members_type_name_length_max()
        for name, type_descr in self.union_db.items():
            if isinstance(type_descr, OrderedDict):
                txt += "    {\n"
                for sub_name, sub_type in type_descr.items():
                    txt += "        %s%s %s\n" % \
                           (sub_type.get_text(),
                            " " * (L - len(sub_type.get_text())-4),
                            sub_name)
                txt += "    }\n"
            else:
                txt += "    %s%s %s\n" % \
                       (type_descr.get_text(),
                        " " * (L - len(type_descr.get_text())),
                        name)
        txt += "}\n"

        # constructor / copy / destructor
        if not self.constructor.is_whitespace():
            txt += "constructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.constructor)
            txt += "}"

        if self.copy is not None:
            txt += "copy {\n"
            txt += Lng.SOURCE_REFERENCED(self.copy)
            txt += "}"

        if not self.destructor.is_whitespace():
            txt += "destructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.destructor)
            txt += "}"

        if not self.body.is_whitespace():
            txt += "body {\n"
            txt += Lng.SOURCE_REFERENCED(self.body)
            txt += "}"

        return txt

    def manually_written(self):
        return False