Exemple #1
0
 def Expects(self, State):
     keys = self.StateTokenMap.keys()
     Tokens = kjSet.NewSet([])
     for (state1, token) in keys:
         if State == state1:
             kjSet.addMember(token, Tokens)
     return kjSet.get_elts(Tokens)
Exemple #2
0
 def Expects(self, State):
   keys = self.StateTokenMap.keys()
   Tokens = kjSet.NewSet( [] )
   for (state1,token) in keys:
      if State == state1:
         kjSet.addMember(token,Tokens)
   return kjSet.get_elts(Tokens)
  def SLRFixDFA(self):
     DFA = self.DFA
     NFA = self.SLRNFA
     # look through the states (except 0=success) of the DFA
     # initially don't add any new states, just record
     # actions to be done
     #   uses convention that 0 is successful final state

     # ToDo is a dictionary which maps 
     #     (State, Token) to a item to reduce
     ToDo = {}
     Error = None
     for State in range(1, len(DFA.States) ):
        # look for a final item for a rule in this state
        fromNFAindices = kjSet.get_elts(DFA.States[State][1])
        for NFAindex in fromNFAindices:
           item = NFA.States[NFAindex][1]
           # if the item is final remember to do the reductions...
           if self.SLRItemIsFinal(item):
              (ruleindex, position) = item
              Rule = self.Rules[ruleindex]
              Head = Rule.Nonterm
              Following = kjSet.Neighbors( self.Follow, Head )
              for Token in Following:
                 key = (State, Token)
                 if not ToDo.has_key(key):
                    ToDo[ key ] = item
                 else:
                    # it might be okay if the items are identical?
                    item2 = ToDo[key]
                    if item != item2:
                       print "reduce/reduce conflict on ",key
                       self.ItemDump(item)
                       self.ItemDump(item2)
                    Error = " apparent reduce/reduce conflict"
                 #endif
              #endfor
           #endif
        #endfor NFAindex
     #endfor State

     # for each (State,Token) pair which indicates a reduction
     # record the reduction UNLESS the map is already set for the pair
     for key in ToDo.keys():
        (State,Token) = key
        item = ToDo[key]
        (rulenum, dotpos) = item
        ExistingMap = DFA.map( State, Token )
        if ExistingMap[0] == NOMATCHFLAG:
           DFA.SetReduction( State, Token, rulenum )
        else:
           print "apparent shift/reduce conflict"
           print "reduction: ", key, ": "
           self.ItemDump(item)
           print "existing map ", ExistingMap
           Error = " apparent shift/reduce conflict"
     #endfor
     if Error and ABORTONERROR:
        raise NotSLRError, Error
Exemple #4
0
  def SLRFixDFA(self):
     DFA = self.DFA
     NFA = self.SLRNFA
     # look through the states (except 0=success) of the DFA
     # initially don't add any new states, just record
     # actions to be done
     #   uses convention that 0 is successful final state

     # ToDo is a dictionary which maps 
     #     (State, Token) to a item to reduce
     ToDo = {}
     Error = None
     for State in range(1, len(DFA.States) ):
        # look for a final item for a rule in this state
        fromNFAindices = kjSet.get_elts(DFA.States[State][1])
        for NFAindex in fromNFAindices:
           item = NFA.States[NFAindex][1]
           # if the item is final remember to do the reductions...
           if self.SLRItemIsFinal(item):
              (ruleindex, position) = item
              Rule = self.Rules[ruleindex]
              Head = Rule.Nonterm
              Following = kjSet.Neighbors( self.Follow, Head )
              for Token in Following:
                 key = (State, Token)
                 if not ToDo.has_key(key):
                    ToDo[ key ] = item
                 else:
                    # it might be okay if the items are identical?
                    item2 = ToDo[key]
                    if item != item2:
                       print "reduce/reduce conflict on ",key
                       self.ItemDump(item)
                       self.ItemDump(item2)
                    Error = " apparent reduce/reduce conflict"
                 #endif
              #endfor
           #endif
        #endfor NFAindex
     #endfor State

     # for each (State,Token) pair which indicates a reduction
     # record the reduction UNLESS the map is already set for the pair
     for key in ToDo.keys():
        (State,Token) = key
        item = ToDo[key]
        (rulenum, dotpos) = item
        ExistingMap = DFA.map( State, Token )
        if ExistingMap[0] == NOMATCHFLAG:
           DFA.SetReduction( State, Token, rulenum )
        else:
           print "apparent shift/reduce conflict"
           print "reduction: ", key, ": "
           self.ItemDump(item)
           print "existing map ", ExistingMap
           Error = " apparent shift/reduce conflict"
     #endfor
     if Error and ABORTONERROR:
        raise NotSLRError, Error
Exemple #5
0
 def DumpItemSet(self,State):
     DFA = self.DFA
     NFA = self.SLRNFA
     print
     print "STATE ", State, " *******"
     fromNFAindices = kjSet.get_elts(DFA.States[State][1])
     for NFAindex in fromNFAindices:
         item = NFA.States[NFAindex][1]
         print "  ", NFAindex, ": ",
         self.ItemDump(item)
Exemple #6
0
    def MakeTokenArchives(self):
        # make a list of all tokens and
        # initialize token > int dictionary
        keys = self.Gram.DFA.StateTokenMap.keys()
        tokenToInt = {}
        tokenSet = kjSet.NewSet([])
        for k in keys:
            kjSet.addMember(k[1], tokenSet)
        tokens = kjSet.get_elts(tokenSet)
        for i in range(0,len(tokens)):
            tokenToInt[ tokens[i] ] = i

        self.keys = keys
        self.tokens = tokens # global sub
        self.tokInt = tokenToInt # global sub
Exemple #7
0
    def Eclosure(self, Epsilon, DoNullMaps=0):
        ''' return the epsilon closure of the FSM as a new FSM

            DoNullMap, if set, will map unexpected tokens to
            the "empty" state (usually creating a really big fsm)
        '''
        Closure = CFSMachine( self.root_nonTerminal )

        # compute the Epsilon Graph between states
        EGraph = kjSet.NewDG([])
        for State in range(0,self.maxState+1):
            # every state is E-connected to self
            kjSet.AddArc( EGraph, State, State )
            # add possible transition on epsilon (ONLY ONE SUPPORTED!)
            key = (State, Epsilon)
            if self.StateTokenMap.has_key(key):
                keymap = self.StateTokenMap[key]
                if keymap[0][0] != MOVETOFLAG:
                    raise TypeError, "unexpected map type in StateTokenMap"
                for (Flag,ToState) in keymap:
                    kjSet.AddArc( EGraph, State, ToState )
        #endfor
        # transitively close EGraph
        kjSet.TransClose( EGraph )

        # Translate EGraph into a dictionary of lists
        EMap = {}
        for State in range(0,self.maxState+1):
            EMap[State] = kjSet.Neighbors( EGraph, State )

        # make each e-closure of each self.state a state of the closure FSM.
        # here closure states assumed transient -- reset elsewhere.
        # first do the initial state
        Closure.States[ Closure.initial_state ] = \
           [TRANSFLAG, kjSet.NewSet(EMap[self.initial_state]) ]
        # do all other states (save initial and successful final states)
        #for State in range(0,self.maxState+1):
        #   if State != self.initial_state \
        #    and State != self.successful_final_state:
        #      Closure.NewSetState(TRANSFLAG, kjSet.NewSet(EMap[State]) )
        ##endfor

        # compute set of all known tokens EXCEPT EPSILON
        Tokens = kjSet.NewSet( [] )
        for (State, Token) in self.StateTokenMap.keys():
            if Token != Epsilon:
                kjSet.addMember(Token, Tokens)
        # tranform it into a list
        Tokens = kjSet.get_elts(Tokens)

        # for each state of the the closure FSM (past final) add transitions
        # and add new states as needed until all states are processed
        # (uses convention that states are allocated sequentially)
        ThisClosureState = 1
        while ThisClosureState <= Closure.maxState:
            MemberStates = kjSet.get_elts(Closure.States[ThisClosureState][1])
            # for each possible Token, compute the union UTrans of all
            # e-closures for all transitions for all member states,
            # on the Token, make  UTrans a new state (if needed),
            # and transition ThisClosureState to UTrans on Token
            for Token in Tokens:
                UTrans = kjSet.NewSet( [] )
                for MState in MemberStates:
                    # if MState has a transition on Token, include
                    # EMap for the destination state
                    key = (MState, Token)
                    if self.StateTokenMap.has_key(key):
                        DStateTup = self.StateTokenMap[key]
                        if DStateTup[0][0] != MOVETOFLAG:
                            raise TypeError, "unknown map type"
                        for (DFlag, DState) in DStateTup:
                            for EDState in EMap[DState]:
                                kjSet.addMember(EDState, UTrans)
                    #endif
                #endfor MState
                # register UTrans as a new state if needed
                UTState = Closure.NewSetState(TRANSFLAG, UTrans)
                # record transition from
                # ThisClosureState to UTState on Token
                if DoNullMaps:
                    Closure.SetMap( ThisClosureState, Token, UTState)
                else:
                    if not kjSet.Empty(UTrans):
                        Closure.SetMap( ThisClosureState, Token, UTState)
            #endfor Token
            ThisClosureState = ThisClosureState +1
        #endwhile
        return Closure