예제 #1
0
    def hasSLLConflictTerminatingPrediction(cls, mode, configs):
        # Configs in rule stop states indicate reaching the end of the decision
        # rule (local context) or end of start rule (full context). If all
        # configs meet this condition, then none of the configurations is able
        # to match additional input so we terminate prediction.
        #
        if cls.allConfigsInRuleStopStates(configs):
            return True

        # pure SLL mode parsing
        if mode == PredictionMode.SLL:
            # Don't bother with combining configs from different semantic
            # contexts if we can fail over to full LL; costs more time
            # since we'll often fail over anyway.
            if configs.hasSemanticContext:
                # dup configs, tossing out semantic predicates
                dup = ATNConfigSet()
                for c in configs:
                    c = ATNConfig(config=c, semantic=SemanticContext.NONE)
                    dup.add(c)
                configs = dup
            # now we have combined contexts for configs with dissimilar preds

        # pure SLL or combined SLL+LL mode parsing
        altsets = cls.getConflictingAltSubsets(configs)
        return cls.hasConflictingAltSet(
            altsets) and not cls.hasStateAssociatedWithOneAlt(configs)
예제 #2
0
파일: DFAState.py 프로젝트: clinzy/butools
 def __init__(self,
              stateNumber: int = -1,
              configs: ATNConfigSet = ATNConfigSet()):
     self.stateNumber = stateNumber
     self.configs = configs
     # {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
     #  {@link Token#EOF} maps to {@code edges[0]}.
     self.edges = None
     self.isAcceptState = False
     # if accept state, what ttype do we match or alt do we predict?
     #  This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
     #  {@link #requiresFullContext}.
     self.prediction = 0
     self.lexerActionExecutor = None
     # Indicates that this state was created during SLL prediction that
     # discovered a conflict between the configurations in the state. Future
     # {@link ParserATNSimulator#execATN} invocations immediately jumped doing
     # full context prediction if this field is true.
     self.requiresFullContext = False
     # During SLL parsing, this is a list of predicates associated with the
     #  ATN configurations of the DFA state. When we have predicates,
     #  {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
     #  on-the-fly. If this is not null, then {@link #prediction} is
     #  {@link ATN#INVALID_ALT_NUMBER}.
     #
     #  <p>We only use these for non-{@link #requiresFullContext} but conflicting states. That
     #  means we know from the context (it's $ or we don't dip into outer
     #  context) that it's an ambiguity not a conflict.</p>
     #
     #  <p>This list is computed by {@link ParserATNSimulator#predicateDFAState}.</p>
     self.predicates = None
예제 #3
0
class ATNSimulator(object):

    # Must distinguish between missing edge and edge we know leads nowhere#/
    ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())

    # The context cache maps all PredictionContext objects that are ==
    #  to a single cached copy. This cache is shared across all contexts
    #  in all ATNConfigs in all DFA states.  We rebuild each ATNConfigSet
    #  to use only cached nodes/graphs in addDFAState(). We don't want to
    #  fill this during closure() since there are lots of contexts that
    #  pop up but are not used ever again. It also greatly slows down closure().
    #
    #  <p>This cache makes a huge difference in memory and a little bit in speed.
    #  For the Java grammar on java.*, it dropped the memory requirements
    #  at the end from 25M to 16M. We don't store any of the full context
    #  graphs in the DFA because they are limited to local context only,
    #  but apparently there's a lot of repetition there as well. We optimize
    #  the config contexts before storing the config set in the DFA states
    #  by literally rebuilding them with cached subgraphs only.</p>
    #
    #  <p>I tried a cache for use during closure operations, that was
    #  whacked after each adaptivePredict(). It cost a little bit
    #  more time I think and doesn't save on the overall footprint
    #  so it's not worth the complexity.</p>
    #/
    def __init__(self, atn, sharedContextCache):
        self.atn = atn
        self.sharedContextCache = sharedContextCache

    def getCachedContext(self, context):
        if self.sharedContextCache is None:
            return context
        visited = dict()
        return getCachedPredictionContext(context, self.sharedContextCache,
                                          visited)
예제 #4
0
 def setPrecedenceDfa(self, precedenceDfa):
     if self.precedenceDfa != precedenceDfa:
         self._states = dict()
         if precedenceDfa:
             precedenceState = DFAState(configs=ATNConfigSet())
             precedenceState.edges = []
             precedenceState.isAcceptState = False
             precedenceState.requiresFullContext = False
             self.s0 = precedenceState
         else:
             self.s0 = None
         self.precedenceDfa = precedenceDfa
예제 #5
0
    def __init__(self, atnStartState, decision=0):
        # From which ATN state did we create this DFA?
        self.atnStartState = atnStartState
        self.decision = decision
        # A set of all DFA states. Use {@link Map} so we can get old state back
        #  ({@link Set} only allows you to see if it's there).
        self._states = dict()
        self.s0 = None
        # {@code true} if this DFA is for a precedence decision; otherwise,
        # {@code false}. This is the backing field for {@link #isPrecedenceDfa},
        # {@link #setPrecedenceDfa}.
        self.precedenceDfa = False

        if isinstance(atnStartState, StarLoopEntryState):
            if atnStartState.isPrecedenceDecision:
                self.precedenceDfa = True
                precedenceState = DFAState(configs=ATNConfigSet())
                precedenceState.edges = []
                precedenceState.isAcceptState = False
                precedenceState.requiresFullContext = False
                self.s0 = precedenceState
예제 #6
0
        newState.configs = configs
        dfa.states[newState] = newState
        return newState

    def getDFA(self, mode: int):
        return self.decisionToDFA[mode]

    # Get the text matched so far for the current token.
    def getText(self, input: InputStream):
        # index is first lookahead char, don't include.
        return input.getText(self.startIndex, input.index - 1)

    def consume(self, input: InputStream):
        curChar = input.LA(1)
        if curChar == ord('\n'):
            self.line += 1
            self.column = 0
        else:
            self.column += 1
        input.consume()

    def getTokenName(self, t: int):
        if t == -1:
            return "EOF"
        else:
            return "'" + chr(t) + "'"


LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())

del Lexer