예제 #1
0
파일: support.py 프로젝트: msander/lepl
    def _advance(self, delta=1):
        '''
        Move forwards in the stream.

        I've tried to optimise for the common (delta=1) case.

        The following conventions are followed:
        - `offset` is the offset from the initial input
        - `stream` is the stream starting at the current location
        - `next_stream` is the stream after current
        - `current` is the character at the current location
        - `previous` is the character just before the current location
        - `excess` is the amount by which we advanced past the end

        If `excess` is set, streams should not be used.
        '''
        assert delta >= 0
        self._offset += delta
        if self._excess:
            self._excess += delta
            self._previous = None
        elif delta == 1:
            self._stream = self._next_stream
            self._previous = self._current
            try:
                (self._current, self._next_stream) = s_next(self._next_stream)
            except StopIteration:
                self._current = None
                self._next_stream = None
                self._excess = 1
        elif delta:
            old_stream = self._stream
            try:
                (advanced, self._stream) = s_next(old_stream, delta)
                self._previous = advanced[-1:]
                try:
                    (self._current, self._next_stream) = s_next(self._stream)
                except StopIteration:
                    self._current = None
                    self._next_stream = None
                    self._excess = 1
            except StopIteration:
                self._stream = None
                self._next_stream = None
                self._current = None
                self._previous = None
                self._excess = delta - s_len(old_stream) + 1
        return True
예제 #2
0
파일: support.py 프로젝트: gcarothers/lepl
    def _advance(self, delta=1):
        '''
        Move forwards in the stream.

        I've tried to optimise for the common (delta=1) case.

        The following conventions are followed:
        - `offset` is the offset from the initial input
        - `stream` is the stream starting at the current location
        - `next_stream` is the stream after current
        - `current` is the character at the current location
        - `previous` is the character just before the current location
        - `excess` is the amount by which we advanced past the end

        If `excess` is set, streams should not be used.
        '''
        assert delta >= 0
        self._offset += delta
        if self._excess:
            self._excess += delta
            self._previous = None
        elif delta == 1:
            self._stream = self._next_stream
            self._previous = self._current
            try:
                (self._current, self._next_stream) = s_next(self._next_stream)
            except StopIteration:
                self._current = None
                self._next_stream = None
                self._excess = 1
        elif delta:
            old_stream = self._stream
            try:
                (advanced, self._stream) = s_next(old_stream, delta)
                self._previous = advanced[-1:]
                try:
                    (self._current, self._next_stream) = s_next(self._stream)
                except StopIteration:
                    self._current = None
                    self._next_stream = None
                    self._excess = 1
            except StopIteration:
                self._stream = None
                self._next_stream = None
                self._current = None
                self._previous = None
                self._excess = delta - s_len(old_stream) + 1
        return True
예제 #3
0
파일: memo.py 프로젝트: cajus/python-lepl
 def _untagged_match(self, stream):
     '''
     Match the stream without trampolining.
     '''
     key = s_key(stream, self.__state)
     if key not in self.__depth:
         self.__depth[key] = 0
     depth = self.__depth[key]
     if self.curtail(depth, s_len(stream)):
         return
     if (key, depth) not in self.__table:
         self.__table[(key, depth)] = [[], self.matcher._match(stream)]
     descriptor = self.__table[(key, depth)]
     for i in count():
         assert depth == self.__depth[key]
         if i == len(descriptor[0]):
             result = next(descriptor[1].generator)
             descriptor[0].append(result)
         yield descriptor[0][i]
예제 #4
0
 def _untagged_match(self, stream):
     '''
     Match the stream without trampolining.
     '''
     key = s_key(stream, self.__state)
     if key not in self.__depth:
         self.__depth[key] = 0
     depth = self.__depth[key]
     if self.curtail(depth, s_len(stream)):
         return
     if (key, depth) not in self.__table:
         self.__table[(key, depth)] = [[], self.matcher._match(stream)]
     descriptor = self.__table[(key, depth)]
     for i in count():
         assert depth == self.__depth[key]
         if i == len(descriptor[0]):
             result = next(descriptor[1].generator)
             descriptor[0].append(result)
         yield descriptor[0][i]
예제 #5
0
파일: lexer.py 프로젝트: gcarothers/lepl
 def _match(self, in_stream):
     '''
     Implement matching - pass token stream to tokens.
     '''
     (max, clean_stream) = s_new_max(in_stream)
     try:
         length = s_len(in_stream)
     except TypeError:
         length = None
     factory = s_factory(in_stream)
     token_stream = factory.to_token(
                         self._tokens(clean_stream, max), 
                         id=s_id(in_stream), factory=factory, 
                         max=s_max(in_stream), 
                         global_kargs=s_global_kargs(in_stream),
                         delta=s_delta(in_stream), len=length,
                         cache_level=s_cache_level(in_stream)+1) 
     in_stream = None
     generator = self.matcher._match(token_stream)
     while True:
         yield (yield generator)
예제 #6
0
파일: memo.py 프로젝트: cajus/python-lepl
 def _match(self, stream):
     '''
     Attempt to match the stream.
     '''
     key = s_key(stream, self.__state)
     if key not in self.__depth:
         self.__depth[key] = 0
     depth = self.__depth[key]
     if self.curtail(depth, s_len(stream)):
         return
     if (key, depth) not in self.__table:
         self.__table[(key, depth)] = [[], self.matcher._match(stream)]
     descriptor = self.__table[(key, depth)]
     for i in count():
         assert depth == self.__depth[key]
         if i == len(descriptor[0]):
             try:
                 self.__depth[key] += 1
                 result = yield descriptor[1]
             finally:
                 self.__depth[key] -= 1
             descriptor[0].append(result)
         yield descriptor[0][i]
예제 #7
0
 def _match(self, stream):
     '''
     Attempt to match the stream.
     '''
     key = s_key(stream, self.__state)
     if key not in self.__depth:
         self.__depth[key] = 0
     depth = self.__depth[key]
     if self.curtail(depth, s_len(stream)):
         return
     if (key, depth) not in self.__table:
         self.__table[(key, depth)] = [[], self.matcher._match(stream)]
     descriptor = self.__table[(key, depth)]
     for i in count():
         assert depth == self.__depth[key]
         if i == len(descriptor[0]):
             try:
                 self.__depth[key] += 1
                 result = yield descriptor[1]
             finally:
                 self.__depth[key] -= 1
             descriptor[0].append(result)
         yield descriptor[0][i]
예제 #8
0
 def _match(self, in_stream):
     '''
     Implement matching - pass token stream to tokens.
     '''
     (max, clean_stream) = s_new_max(in_stream)
     try:
         length = s_len(in_stream)
     except TypeError:
         length = None
     factory = s_factory(in_stream)
     token_stream = factory.to_token(self._tokens(clean_stream, max),
                                     id=s_id(in_stream),
                                     factory=factory,
                                     max=s_max(in_stream),
                                     global_kargs=s_global_kargs(in_stream),
                                     delta=s_delta(in_stream),
                                     len=length,
                                     cache_level=s_cache_level(in_stream) +
                                     1)
     in_stream = None
     generator = self.matcher._match(token_stream)
     while True:
         yield (yield generator)