Exemplo n.º 1
0
 def _parse_state_bits(self, f):
     lines = open(f, 'r').readlines()
     d = []
     state_input_pattern = re.compile(r'(?P<key>[^\s]+)\s+(?P<value>.*)')
     while len(lines) > 0:
         line = lines.pop(0)
         line = patterns.comment_pattern.sub("", line)
         line = patterns.leading_whitespace_pattern.sub("", line)
         if line == '':
             continue
         line = slash_expand.expand_all_slashes(line)
         p = state_input_pattern.search(line)
         if p:
             s = r'\b' + p.group('key') + r'\b'
             pattern = re.compile(s)
             d.append((pattern, p.group('value')))
         else:
             die("Bad state line: %s" % line)
     return d
Exemplo n.º 2
0
def parse_state_bits(lines):
    d = []
    state_input_pattern = re.compile(r'(?P<key>[^\s]+)\s+(?P<value>.*)')
    while len(lines) > 0:
        line = lines.pop(0)
        line = comment_pattern.sub("",line)
        line = leading_whitespace_pattern.sub("",line)
        if line == '':
            continue
        line = slash_expand.expand_all_slashes(line)
        p = state_input_pattern.search(line)
        if p:
            #_vmsgb(p.group('key'), p.group('value'))
            #d[p.group('key')] = p.group('value')
            s = r'\b' + p.group('key') + r'\b'
            pattern = re.compile(s) 
            d.append( (pattern, p.group('value')) )
        else:
            print("Bad state line: %s"  % line)
            exit()
    return d
Exemplo n.º 3
0
def read_decoder_instruction_file(lines):
    """Taking a slightly different tack with the ISA file because
    it is so large. Processing each line as we encounter it rather
    than buffering up the whole file. Also, just storing the parts
    I need. """
    continuation_pattern = re.compile(r'\\$')
    lines = process_continuations_without_file(lines)      # we must preserve FILE, so define a new function
    nts = {}
    nt = None
    iclass = None
    uname = None
    unamed = None
    ipattern = None
    started = False
    while len(lines) > 0:
        line = lines.pop(0)

        fn = file_pattern.match(line)
        if fn:
            filename = fn.group('file')

        line = comment_pattern.sub("",line)
        #line = leading_whitespace_pattern.sub("",line)
        line=line.strip()
        if line == '':
            continue
        line = slash_expand.expand_all_slashes(line)

        if udelete_pattern.search(line):
            m = udelete_full_pattern.search(line)
            unamed = m.group('uname')
            logger.debug("REGISTER BAD UNAME: %s" %unamed)
            gs.deleted_unames[unamed] = True
            continue

        if delete_iclass_pattern.search(line):
            m = delete_iclass_full_pattern.search(line)
            iclass = m.group('iclass')
            gs.deleted_instructions[iclass] = True
            continue
    
        
        line = expand_state_bits_one_line(line)
        p = nt_pattern.match(line)
        if p:
            nt_name =  p.group('ntname')
            if nt_name in nts:
                nt = nts[nt_name]
            else:
                nt = nonterminal_t(nt_name, "")
                nts[nt_name] = nt
            continue

        if left_curly_pattern.match(line):
            if started:
                die("Nested instructions")
            started = True
            iclass = None
            uname = None
            category = None
            extension = None
            cpl = None
            continue
        
        if right_curly_pattern.match(line):
            if not started:
                die("Mis-nested instructions")
            started = False
            iclass = None
            uname = None
            category = None
            extension = None
            cpl = None
            continue
        ic = iclass_pattern.match(line)
        if ic:
            iclass = ic.group('iclass')
            continue

        cp = cpl_pattern.match(line)
        if cp:
            cpl = cp.group('cpl')
            continue

        ca = category_pattern.match(line)
        if ca:
            category = ca.group('category')
            continue

        ex = extension_pattern.match(line)
        if ex:
            extension = ex.group('extension')
            continue

        un = uname_pattern.match(line)
        if un:
            uname = un.group('uname')
            continue
        
        ip = ipattern_pattern.match(line)
        if ip:
            ipattern = ip.group('ipattern')
            continue
        
        if no_operand_pattern.match(line):
            finalize_decode_conversion(iclass,'', ipattern, category, extension, filename, uname, cpl)
            continue

        op = operand_pattern.match(line)
        if op:
            operands = op.group('operands')
            finalize_decode_conversion(iclass, operands, ipattern, category, extension, filename, uname, cpl)
            continue
    return
Exemplo n.º 4
0
    def _process_input_lines(self, fn):
        """We'll still have multiple pattern/operands/iform lines after reading this.
        Stores each record in a list of dictionaries. Each dictionary has key-value pairs
        and the value is always a list"""
        lines = open(fn).readlines()
        lines = genutil.process_continuations(lines)

        started = False
        recs = []
        nt_name = "Unknown"
        i = 0

        for line in lines:
            i = i + 1
            if i > 500:
                sys.stderr.write(".")
                i = 0
            line = patterns.comment_pattern.sub("", line)
            line = line.strip()
            if line == '':
                continue
            line = slash_expand.expand_all_slashes(line)

            if patterns.udelete_pattern.search(line):
                m = patterns.udelete_full_pattern.search(line)
                unamed = m.group('uname')
                self.deleted_unames[unamed] = True
                continue

            if patterns.delete_iclass_pattern.search(line):
                m = patterns.delete_iclass_full_pattern.search(line)
                iclass = m.group('iclass')
                self.deleted_instructions[iclass] = True
                continue

            line = self._expand_state_bits_one_line(line)

            p = patterns.nt_pattern.match(line)
            if p:
                nt_name = p.group('ntname')
                continue

            if patterns.left_curly_pattern.match(line):
                if started:
                    die("Nested instructions")
                started = True
                d = collections.defaultdict(list)
                d['NTNAME'].append(nt_name)
                continue

            if patterns.right_curly_pattern.match(line):
                if not started:
                    die("Mis-nested instructions")
                started = False
                recs.append(d)
                continue

            if started:
                key, value = line.split(":", 1)
                key = key.strip()
                value = value.strip()
                if value.startswith(':'):
                    die("Double colon error {}".format(line))
                if key == 'IFORM':
                    # fill in missing iforms with empty strings
                    x = len(d['PATTERN']) - 1
                    y = len(d['IFORM'])
                    # if we have more patterns than iforms, add some
                    # blank iforms
                    while y < x:
                        d['IFORM'].append('')
                        y = y + 1

                d[key].append(value)

            else:
                die("Unexpected: [{0}]".format(line))
        sys.stderr.write("\n")
        return recs
def GetCategoryMatchedICLASS(lines, category_regex="", add_file=False):
    """Taking a slightly different tack with the ISA file because
    it is so large. Processing each line as we encounter it rather
    than buffering up the whole file. Also, just storing the parts
    I need. """
    continuation_pattern = re.compile(r'\\$')
    match_category_pattern = re.compile(category_regex)
    lines = process_continuations(lines)
    nts = {}
    nt = None
    iclass = None
    uname = None
    unamed = None
    ipattern = None
    started = False

    is_matched_class = False
    has_ext = False
    category = ""
    filename = ""
    filename_line = ""

    categorys = {}

    i = 0
    while len(lines) > 0:
        i += 1
        line = lines.pop(0)

        if add_file:
            fn = file_pattern.search(line)
            if fn:
                filename = fn.group("filename")
                filename_line = line
                continue

        line = comment_pattern.sub("", line)
        #line = leading_whitespace_pattern.sub("",line)
        line = line.strip()
        if line == '':
            continue
        line = slash_expand.expand_all_slashes(line)

        if udelete_pattern.search(line):
            m = udelete_full_pattern.search(line)
            unamed = m.group('uname')
            print("LINE %d : REGISTER BAD UNAME: %s" % (i, unamed))
            continue

        if delete_iclass_pattern.search(line):
            m = delete_iclass_full_pattern.search(line)
            iclass = m.group('iclass')
            continue

        if left_curly_pattern.match(line):
            if started:
                util.die("LINE %d : Nested instructions" % i)
            tmp_lines = []
            started = True
            iclass = None
            uname = None
            is_matched_class = False
            has_ext = False
            tmp_lines.append(line)
            continue

        if started:
            tmp_lines.append(line)

        if right_curly_pattern.match(line):
            if not started:
                util.die("LINE %d : Mis-nested instructions" % i)
            # ======= IS_MATCHED ======
            if is_matched_class:
                if add_file:
                    categorys[category].append(filename_line)
                categorys[category].extend(tmp_lines)
            # ======= HAS_EXT ======
            if not has_ext:
                print("LINE %d : ICLASS %s has no category" % (i, iclass))

            started = False
            iclass = None
            uname = None
            continue
        ic = iclass_pattern.match(line)
        if ic:
            iclass = ic.group('iclass')
            continue

        ext = category_pattern.match(line)
        if ext:
            category = ext.group("category")
            has_ext = True
            if category not in categorys:
                categorys[category] = []
            if match_category_pattern.match(category):
                is_matched_class = True

        un = uname_pattern.match(line)
        if un:
            uname = un.group('uname')
            continue

        ip = ipattern_pattern.match(line)
        if ip:
            ipattern = ip.group('ipattern')
            continue

        if no_operand_pattern.match(line):
            continue

        op = operand_pattern.match(line)
        if op:
            operands = op.group('operands')
            continue
    return categorys
Exemplo n.º 6
0
def parse_decode_lines(lines):
    """ Read the flat decoder files (not the ISA file).
    
    Return a tuple:
        ( dict of nonterminals, dict of nonterminal lookup functions )
        
        This parses the so-called flat format with the vertical
        bar used for all the non-instruction tables.

        For decode the semantics are:
            preconditions | dec-actions
        However for encode, the semantics change to:
            enc-actions  | conditions

        And we must take some of the "enc-actions"  and add them to the preconditions.
        These include the actions associated with: MODE,SMODE,EOSZ,EASZ
    """
    nts = {}
    ntlufs = {}
    repeat_nts = {
    }  # some nt/ntluf/seq has multi definition, so we use three dict to record this
    repeat_ntlufs = {}

    while len(lines) > 0:
        line = lines.pop(0)

        fn = file_pattern.match(line)
        if fn:
            filename = fn.group('file')
        #msge("LINEOUT:" + line)
        line = comment_pattern.sub("", line)
        line = leading_whitespace_pattern.sub("", line)
        line = line.rstrip()
        if line == '':
            continue
        line = slash_expand.expand_all_slashes(line)

        p = ntluf_pattern.match(line)
        if p:
            nt_name = p.group('ntname')
            ret_type = p.group('rettype')
            # create a new nonterminal to use
            nt = nonterminal_t(nt_name, filename, ret_type)
            if nt_name in ntlufs:
                if ntlufs[
                        nt_name]:  # if is not none, shows that this seq only has been defined one time
                    tmp = ntlufs[nt_name]
                    repeat_ntlufs[nt_name] = [tmp, nt]
                    ntlufs[nt_name] = None
                else:
                    repeat_ntlufs[nt_name].append(nt)
            else:
                ntlufs[nt_name] = nt
            continue

        p = nt_pattern.match(line)
        if p:
            nt_name = p.group('ntname')
            # create a new nonterminal to use
            nt = nonterminal_t(nt_name, filename)
            if nt_name in nts:
                if nts[nt_name]:  # if is not none, shows that this seq only has been defined one time
                    tmp = nts[nt_name]
                    repeat_nts[nt_name] = [tmp, nt]
                    nts[nt_name] = None
                else:
                    repeat_nts[nt_name].append(nt)
            else:
                nts[nt_name] = nt
            continue

        p = decode_rule_pattern.match(line)
        if p:
            conds = p.group('cond').split(
            )  # rhs, from an encode perspective (decode operands)
            actions = p.group('action').split(
            )  # lhs, from a encode perspective (decode patterns)
            rule = parse_decode_rule(conds, actions, line, nt.name)
            if rule:
                nt.add(rule)
            if nt.multiple_otherwise_rules():
                die("Multiple otherwise rules in %s -- noninvertible" %
                    (nt_name))
            continue

        die("Unhandled line: %s" % line)

    return (nts, ntlufs, repeat_nts, repeat_ntlufs)
Exemplo n.º 7
0
def parse_encode_lines(lines, state_bits):
    """
    Returns a tuple of two dictionaries: (1) a dictionary of
    sequencer_t's and (2) a dictionary of nonterminal_t's
    """
    nts = {}  # nonterminals_t's
    ntlufs = {}  # nonterminals_t's
    seqs = {}  # sequencer_t's

    repeat_nts = {
    }  # some nt/ntluf/seq has multi definition, so we use three dict to record this
    repeat_ntlufs = {}
    repeat_seqs = {}
    i = 0
    while len(lines) > 0:
        line = lines.pop(0)

        fn = file_pattern.match(line)
        if fn:
            filename = fn.group('file')

        line = comment_pattern.sub("", line)
        line = leading_whitespace_pattern.sub("", line)
        if line == '':
            continue
        line = slash_expand.expand_all_slashes(line)

        c = curly_pattern.search(line)
        if c:
            line = re.sub("{", " { ", line)
            line = re.sub("}", " } ", line)

        sequence = sequence_pattern.match(line)
        if sequence:
            seq = sequencer_t(sequence.group('seqname'), filename)
            if seq.name in seqs:
                if seqs[seq.
                        name]:  # if is not none, shows that this seq only has been defined one time
                    tmp = seqs[seq.name]
                    repeat_seqs[seq.name] = [tmp, seq]
                    seqs[seq.name] = None
                else:
                    repeat_seqs[seq.name].append(seq)
            else:
                seqs[seq.name] = seq
            #msg("SEQ MATCH %s" % seq.name)
            nt = None
            continue

        p = ntluf_pattern.match(line)
        if p:
            nt_name = p.group('ntname')
            ret_type = p.group('rettype')
            # create a new nonterminal to use
            nt = nonterminal_t(nt_name, filename, ret_type)
            if nt_name in ntlufs:
                if ntlufs[
                        nt_name]:  # if is not none, shows that this seq only has been defined one time
                    tmp = ntlufs[nt_name]
                    repeat_ntlufs[nt_name] = [tmp, nt]
                    ntlufs[nt_name] = None
                else:
                    repeat_ntlufs[nt_name].append(nt)
            else:
                ntlufs[nt_name] = nt
            seq = None
            continue

        m = nt_pattern.match(line)
        if m:
            nt_name = m.group('ntname')
            nt = nonterminal_t(nt_name, filename)
            if nt_name in nts:
                if nts[nt_name]:  # if is not none, shows that this seq only has been defined one time
                    tmp = nts[nt_name]
                    repeat_nts[nt_name] = [tmp, nt]
                    nts[nt_name] = None
                else:
                    repeat_nts[nt_name].append(nt)
            else:
                nts[nt_name] = nt
            seq = None
            continue
        a = arrow_pattern.match(line)
        if a:
            conds = a.group('cond').split()
            actns = a.group('action').split()
            #msg("ARROW" + str(conds) + "=>" + str(actions))
            conditions = conditions_t()
            for c in conds:
                conditions.and_cond(c)
            rule = rule_t(conditions, actns, nt_name)
            if seq:
                seq.add(rule)
            else:
                # we do not need the rules otherwise->error/nothing in the
                # new encoding structure (hash tables).
                # instead we are holding this info in a matching attribute
                if rule.conditions.and_conditions[0].is_otherwise():
                    if rule.actions[0].is_nothing():
                        nt.otherwise = [actions.gen_return_action('1')]
                    elif rule.actions[0].is_error():
                        nt.otherwise = [actions.gen_return_action('0')]
                    else:
                        nt.otherwise = [actions.action_t(x) for x in actns]
                        # in case we have valid action for the otherwise
                        # rule we should finish it with returnning 1
                        # which is "not an error"
                        nt.otherwise.append(actions.gen_return_action('1'))
                else:
                    nt.add(rule)
        else:
            for nt in line.split():
                seq.add(nt)
    return (seqs, nts, ntlufs, repeat_seqs, repeat_nts, repeat_ntlufs)
Exemplo n.º 8
0
    def _process_input_lines(self, fn):
        """We'll still have multiple pattern/operands/iform lines after reading this.
        Stores each record in a list of dictionaries. Each dictionary has key-value pairs
        and the value is always a list"""
        lines = open(fn).readlines()
        lines = genutil.process_continuations(lines)

        started = False
        recs = []
        nt_name = "Unknown"
        i = 0

        for line in lines:
            i = i + 1
            if i > 500:
                sys.stderr.write(".")
                i = 0
            line = patterns.comment_pattern.sub("", line)
            line = line.strip()
            if line == '':
                continue
            line = slash_expand.expand_all_slashes(line)

            if patterns.udelete_pattern.search(line):
                m = patterns.udelete_full_pattern.search(line)
                unamed = m.group('uname')
                self.deleted_unames[unamed] = True
                continue

            if patterns.delete_iclass_pattern.search(line):
                m = patterns.delete_iclass_full_pattern.search(line)
                iclass = m.group('iclass')
                self.deleted_instructions[iclass] = True
                continue

            line = self._expand_state_bits_one_line(line)

            p = patterns.nt_pattern.match(line)
            if p:
                nt_name = p.group('ntname')
                continue

            if patterns.left_curly_pattern.match(line):
                if started:
                    die("Nested instructions")
                started = True
                d = collections.defaultdict(list)
                d['NTNAME'].append(nt_name)
                continue

            if patterns.right_curly_pattern.match(line):
                if not started:
                    die("Mis-nested instructions")
                started = False
                recs.append(d)
                continue

            if started:
                key, value = line.split(":", 1)
                key = key.strip()
                value = value.strip()
                if value.startswith(':'):
                    die("Double colon error {}".format(line))
                if key == 'PATTERN':
                    # Since some patterns/operand sequences have
                    # iforms and others do not, we can avoid tripping
                    # ourselves up by always adding an iform when we
                    # see the PATTERN token. And if do we see an IFORM
                    # token, we can replace the last one in the list.
                    d['IFORM'].append('')
                if key == 'IFORM':
                    # Replace the last one in the list which was added
                    # when we encountered the PATTERN token.
                    d[key][-1] = value
                else:
                    # for normal tokens we just append them
                    d[key].append(value)
            else:
                die("Unexpected: [{0}]".format(line))
        sys.stderr.write("\n")
        return recs