Ejemplo n.º 1
0
def rearrange_regs(regs_list):
    """Return a list of enumer.enumer_values_t objects to be passed to
       enum_txt_writer"""
    groups = uniqueify([x.rtype for x in regs_list])
    msgb("RGROUPS", str(groups))
    enumvals = []
    for g in groups:
        k = list(filter(lambda x: x.rtype == g, regs_list))
        k.sort(key=_key_reg_ordinal)
        first = '%s_FIRST' % (g)
        last = '%s_LAST' % (g)

        # first
        enumvals.append(
            enumer.enumer_value_t(k[0].name, display_str=k[0].display_str))
        enumvals.append(
            enumer.enumer_value_t(first, value=k[0].name,
                                  doxygen='//< PSEUDO'))

        # everything in the middle
        if len(k) > 1:
            enumvals.extend([
                enumer.enumer_value_t(x.name, display_str=x.display_str)
                for x in k[1:]
            ])
        #last
        enumvals.append(
            enumer.enumer_value_t(last, value=k[-1].name, doxygen='//<PSEUDO'))

    return enumvals
Ejemplo n.º 2
0
def emit_ild_enum_dups(agi):
    evalues = []

    sorted_list = sorted(agi.map_info, key=lambda x: x.map_name)

    for mi in sorted_list:
        val = None
        if isinstance(mi.map_id, int):
            val = str(mi.map_id)

        e = enumer.enumer_value_t(mi.map_name.upper(), val)
        evalues.append(e)

    evalues.append('MAP_INVALID')

    enum = enum_txt_writer.enum_info_t(evalues,
                                       agi.common.options.xeddir,
                                       agi.common.options.gendir,
                                       'xed-ild',
                                       'xed_ild_map_enum_t',
                                       'XED_ILD_',
                                       cplusplus=False)

    enum.run_enumer()
    agi.add_file_name(enum.src_full_file_name)
    agi.add_file_name(enum.hdr_full_file_name, header=True)
    agi.all_enums['xed_ild_map_enum_t'] = evalues
Ejemplo n.º 3
0
 def prepare_lines(self):
     """Convert the lines to the appropriate type for emitting the
     enumeration"""
     self.tuples = []
     for line in self.lines:
         if isinstance(line, enumer.enumer_value_t):
             self.tuples.append(line)
         elif type(line) == tuple:
             if len(line) == 3:
                 (token, value, comment) = line
             else:
                 genutil.die("Cannot handle line: %s" % (str(line)))
             token = self.prep_name(token)
             self.tuples.append(enumer.enumer_value_t(token, value, comment))
         else:
             token = self.prep_name(line)
             self.tuples.append(enumer.enumer_value_t(token))
Ejemplo n.º 4
0
    def read_file(self):
        """Read in an existing enumeration file name, and build our
        internal enumer structure. Return a tuple with the consumed data."""
        stream_ifdef = ''
        lines = open(self.enum_fn, 'r').readlines()
        simple_tuples = []
        density = 'automatic'
        namespace = None
        proto_prefix = ''
        extra_header = []
        cplusplus = False
        for line in lines:
            nline = metaenum_t.comment_pattern.sub('', line).strip()
            if len(nline) == 0:
                continue
            wrds = nline.split()
            if wrds[0] == 'cplusplus':
                cplusplus = True
            elif wrds[0] == 'namespace':
                namespace = wrds[1]
            elif wrds[0] == 'hfn':
                hfn = wrds[1]
            elif wrds[0] == 'cfn':
                cfn = wrds[1]
            elif wrds[0] == 'density':
                density = wrds[1]
            elif wrds[0] == 'prefix':
                prefix = wrds[1]
            elif wrds[0] == 'typename':
                typename = wrds[1]
            elif wrds[0] == 'stream_ifdef':
                stream_ifdef = wrds[1]
            elif wrds[0] == 'proto_prefix':
                proto_prefix = wrds[1]
            elif wrds[0] == 'extra_header':
                extra_header.append(wrds[1])
            else:
                token = wrds[0]
                comment = None
                value = None
                if len(wrds) > 1:
                    if metaenum_t.doxygen_comment_pattern.match(wrds[1]):
                        comment = ' '.join(wrds[1:])
                    else:
                        value = wrds[1]
                        if len(wrds) > 2:
                            comment = ' '.join(wrds[2:])
                simple_tuples.append((token, value, comment))

        self.tuples = []
        for token, value, comment in simple_tuples:
            self.tuples.append(enumer.enumer_value_t(token, value, comment))

        self.cfn = cfn
        self.hfn = hfn
        self.density = density
        self.namespace = namespace
        self.type_name = typename
        self.prefix = prefix
        self.stream_ifdef = stream_ifdef
        self.proto_prefix = proto_prefix
        self.extra_header = extra_header
        self.cplusplus = cplusplus