Example #1
0
class TokenTypeDescriptorCore:
    """Object used during the generation of the TokenTypeDescriptor."""
    def __init__(self, Core=None):
        if Core is None:
            self._file_name                = Setup.output_token_class_file
            self._file_name_implementation = Setup.output_token_class_file_implementation
            if Setup.token_class_name.find("::") != -1:
                Setup.token_class_name,       \
                Setup.token_class_name_space, \
                Setup.token_class_name_safe = \
                        read_namespaced_name(Setup.token_class_name, 
                                             "token class (options --token-class, --tc)")
            self.class_name            = Setup.token_class_name
            self.class_name_safe       = Setup.token_class_name_safe
            self.name_space            = Setup.token_class_name_space
            self.open_for_derivation_f      = False
            self.token_contains_token_id_f  = True
            self.token_id_type         = CodeUser("size_t", SourceRef())
            self.column_number_type    = CodeUser("size_t", SourceRef())
            self.line_number_type      = CodeUser("size_t", SourceRef())

            self.distinct_db = {}
            self.union_db    = {}

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            self._file_name                = Core._file_name
            self._file_name_implementation = Core._file_name_implementation
            self.class_name            = Core.class_name
            self.class_name_safe       = Core.class_name_safe
            self.name_space            = Core.name_space
            self.open_for_derivation_f      = Core.open_for_derivation_f
            self.token_contains_token_id_f  = Core.token_contains_token_id_f
            self.token_id_type         = Core.token_id_type
            self.column_number_type    = Core.column_number_type
            self.line_number_type      = Core.line_number_type

            self.distinct_db           = Core.distinct_db
            self.union_db              = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]
            
    def set_file_name(self, FileName):
        self._file_name = FileName
        ext = Lng[Setup.language].extension_db[Setup.output_file_naming_scheme][E_Files.HEADER_IMPLEMTATION]
        self._file_name_implementation = FileName + ext

    def __repr__(self):
        txt = ""
        if self._file_name != "": 
            txt += "file name: '%s'\n" % self._file_name
        txt += "class:     '%s'\n" % self.class_name
        if self.open_for_derivation_f: 
            txt += "           (with virtual destructor)\n"
        if self.token_contains_token_id_f == False:
            txt += "           (token id not part of token object)\n"
        txt += "namespace: '%s'\n" % repr(self.name_space)[1:-1]
        txt += "type(token_id)      = %s\n" % self.token_id_type.get_text()
        txt += "type(column_number) = %s\n" % self.column_number_type.get_text()
        txt += "type(line_number)   = %s\n" % self.line_number_type.get_text()

        txt += "distinct members {\n"
        # '0' to make sure, that it works on an empty sequence too.
        L = self.distinct_members_type_name_length_max()
        for name, type_code in self.distinct_db.items():
            txt += "    %s%s %s\n" % (type_code.get_text(), " " * (L - len(type_code.get_text())), name)
        txt += "}\n"
        txt += "union members {\n"

        # '0' to make sure, that it works on an empty sequence too.
        L = self.union_members_type_name_length_max()
        for name, type_descr in self.union_db.items():
            if type(type_descr) == dict:
                txt += "    {\n"
                for sub_name, sub_type in type_descr.items():
                    txt += "        %s%s %s\n" % \
                           (sub_type.get_text(), 
                            " " * (L - len(sub_type.get_text())-4), 
                            sub_name)
                txt += "    }\n"
            else:
                txt += "    %s%s %s\n" % \
                       (type_descr.get_text(), 
                        " " * (L - len(type_descr.get_text())), 
                        name)
        txt += "}\n"

        # constructor / copy / destructor
        if not self.constructor.is_whitespace():
            txt += "constructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.constructor)
            txt += "}"
        
        if self.copy is not None:
            txt += "copy {\n"
            txt += Lng.SOURCE_REFERENCED(self.copy)
            txt += "}"

        if not self.destructor.is_whitespace():
            txt += "destructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.destructor)
            txt += "}"

        if not self.body.is_whitespace():
            txt += "body {\n"
            txt += Lng.SOURCE_REFERENCED(self.body)
            txt += "}"

        return txt

    def manually_written(self):
        return False
Example #2
0
class TokenTypeDescriptorCore(object):
    """Object used during the generation of the TokenTypeDescriptor."""
    def __init__(self, Core=None):
        if Core is None:
            # Consistency is maintained via properties (see below constructor)
            #    self.class_name      = Setup.token_class_name
            #    self.class_name_safe = Setup.token_class_name_safe
            #    self.name_space      = Setup.token_class_name_space
            #    self.token_id_type   = Setup.token_id_type

            # Consistency maintained via 'set_file_name/get_file_name'
            #    self._file_name                = Setup.output_token_class_file
            #    self._file_name_implementation = Setup.output_token_class_file_implementation

            self.token_id_type_defined_f = False
            self.open_for_derivation_f = False
            self.token_contains_token_id_f = True
            self.token_id_type = "size_t"
            self.column_number_type = CodeUser("size_t", SourceRef())
            self.line_number_type = CodeUser("size_t", SourceRef())

            self.distinct_db = OrderedDict()  # See comment [MEMBER PACKAGING]
            self.union_db = OrderedDict()  # See comment [MEMBER PACKAGING]

            for name, default_value in token_type_code_fragment_db.iteritems():
                self.__dict__[name] = default_value

        else:
            # Consistency is maintained via properties (see below constructor)
            Setup.token_class_name = Core.class_name
            Setup.token_class_name_safe = Core.class_name_safe
            Setup.token_class_name_space = Core.name_space
            Setup.token_id_type = Core.token_id_type

            # Consistency maintained via 'set_file_name/get_file_name'
            #    self._file_name                = Setup.output_token_class_file
            #    self._file_name_implementation = Setup.output_token_class_file_implementation

            self.token_id_type_defined_f = Core.token_id_type_defined_f
            self.open_for_derivation_f = Core.open_for_derivation_f
            self.token_contains_token_id_f = Core.token_contains_token_id_f
            self.column_number_type = Core.column_number_type
            self.line_number_type = Core.line_number_type

            self.distinct_db = Core.distinct_db
            self.union_db = Core.union_db

            for name in token_type_code_fragment_db.keys():
                self.__dict__[name] = Core.__dict__[name]

    # Maintain consistency with the token class naming provided in 'Setup'.
    # => User properties, in order to only store in 'Setup'.
    @property
    def class_name(self):
        return Setup.token_class_name

    @class_name.setter
    def class_name(self, N):
        Setup.token_class_name = N

    @property
    def class_name_safe(self):
        return Setup.token_class_name_safe

    @class_name_safe.setter
    def class_name_safe(self, N):
        Setup.token_class_name_safe = N

    @property
    def name_space(self):
        return Setup.token_class_name_space

    @name_space.setter
    def name_space(self, N):
        Setup.token_class_name_space = N

    @property
    def token_id_type(self):
        return Setup.token_id_type

    @token_id_type.setter
    @typed(Value=(str, unicode))
    def token_id_type(self, Value):
        Setup.token_id_type = Value
        self.token_id_type_defined_f = True

    def set_file_name(self, FileName):
        ext = Lng.extension_db[E_Files.HEADER_IMPLEMTATION]
        Setup.output_token_class_file = FileName
        Setup.output_token_class_file_implementation = FileName + ext

    def __repr__(self):
        txt = ""
        if self.get_file_name() != "":
            txt += "file name: '%s'\n" % self.get_file_name()
        txt += "class:     '%s'\n" % self.class_name
        if self.open_for_derivation_f:
            txt += "           (with virtual destructor)\n"
        if self.token_contains_token_id_f == False:
            txt += "           (token id not part of token object)\n"
        txt += "namespace: '%s'\n" % repr(self.name_space)[1:-1]
        txt += "type(token_id)      = %s\n" % self.token_id_type
        txt += "type(column_number) = %s\n" % self.column_number_type.get_text(
        )
        txt += "type(line_number)   = %s\n" % self.line_number_type.get_text()

        txt += "distinct members {\n"
        # '0' to make sure, that it works on an empty sequence too.
        L = self.distinct_members_type_name_length_max()
        for name, type_code in self.distinct_db.items():
            txt += "    %s%s %s\n" % (type_code.get_text(), " " *
                                      (L - len(type_code.get_text())), name)
        txt += "}\n"
        txt += "union members {\n"

        # '0' to make sure, that it works on an empty sequence too.
        L = self.union_members_type_name_length_max()
        for name, type_descr in self.union_db.items():
            if isinstance(type_descr, OrderedDict):
                txt += "    {\n"
                for sub_name, sub_type in type_descr.items():
                    txt += "        %s%s %s\n" % \
                           (sub_type.get_text(),
                            " " * (L - len(sub_type.get_text())-4),
                            sub_name)
                txt += "    }\n"
            else:
                txt += "    %s%s %s\n" % \
                       (type_descr.get_text(),
                        " " * (L - len(type_descr.get_text())),
                        name)
        txt += "}\n"

        # constructor / copy / destructor
        if not self.constructor.is_whitespace():
            txt += "constructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.constructor)
            txt += "}"

        if self.copy is not None:
            txt += "copy {\n"
            txt += Lng.SOURCE_REFERENCED(self.copy)
            txt += "}"

        if not self.destructor.is_whitespace():
            txt += "destructor {\n"
            txt += Lng.SOURCE_REFERENCED(self.destructor)
            txt += "}"

        if not self.body.is_whitespace():
            txt += "body {\n"
            txt += Lng.SOURCE_REFERENCED(self.body)
            txt += "}"

        return txt

    def manually_written(self):
        return False