Пример #1
0
    def test_span(self):
        doc = self._getdoc(' ab\\cc def ghi ')

        root = syntax_highlight.Tokenizer(tokens=[
            ('span1', syntax_highlight.Span('span', 'a', 'c',
                                            escape='\\')),
            ('span2', syntax_highlight.Span('span', 'd', 'f',
                                            capture_end=False)),
            ('span3', syntax_highlight.Span('span', 'g', 'i', terminate_tokens='h')),
        ])

        tokenizer = syntax_highlight.begin_tokenizer(
            doc, root, 0)

        styles = list(tokenizer)
        assert styles == [
            (0, 1, root.styleid_default),
            (1, 2, root.tokens.span1.styleid_span),
            (2, 5, root.tokens.span1.styleid_span),
            (5, 6, root.tokens.span1.styleid_span),
            (6, 7, root.styleid_default),
            (7, 8, root.tokens.span2.styleid_span),
            (8, 9, root.tokens.span2.styleid_span),
            (9, 11, root.styleid_default),
            (11, 12, root.tokens.span3.styleid_span),
        ]
Пример #2
0
    def test_span(self):
        doc = self._getdoc(' ab\\cc def ghi ')

        root = syntax_highlight.Tokenizer(tokens=[
            ('span1', syntax_highlight.Span('span', 'a', 'c', escape='\\')),
            ('span2',
             syntax_highlight.Span('span', 'd', 'f', capture_end=False)),
            ('span3',
             syntax_highlight.Span('span', 'g', 'i', terminate_tokens='h')),
        ])

        tokenizer = syntax_highlight.begin_tokenizer(doc, root, 0)

        styles = list(tokenizer)
        assert styles == [
            (0, 1, root.styleid_default),
            (1, 2, root.tokens.span1.styleid_span),
            (2, 5, root.tokens.span1.styleid_span),
            (5, 6, root.tokens.span1.styleid_span),
            (6, 7, root.styleid_default),
            (7, 8, root.tokens.span2.styleid_span),
            (8, 9, root.tokens.span2.styleid_span),
            (9, 11, root.styleid_default),
            (11, 12, root.tokens.span3.styleid_span),
        ]
Пример #3
0
    def test_resume(self):
        doc = self._getdoc('    abc')

        root = syntax_highlight.Tokenizer(
            tokens=[('keyword',
                     syntax_highlight.Keywords('keyword', ['abc', 'ghi']))])

        tokeniter = syntax_highlight.begin_tokenizer(doc, root, 2)

        styles = list(tokeniter)
        assert styles == [(0, 4, root.styleid_default),
                          (4, 7, root.tokens.keyword.styleid_token)]

        tokeniter = syntax_highlight.begin_tokenizer(doc, root, 6)

        doc.styles.setints(4, 7, root.tokens.keyword.styleid_token)
        styles = list(tokeniter)
        assert styles == [(0, 4, root.styleid_default),
                          (4, 7, root.tokens.keyword.styleid_token)]
Пример #4
0
    def test_resume(self):
        doc = self._getdoc('    abc')

        root = syntax_highlight.Tokenizer(tokens=[
            ('keyword', syntax_highlight.Keywords('keyword', ['abc', 'ghi']))])

        tokeniter = syntax_highlight.begin_tokenizer(
            doc, root, 2)

        styles = list(tokeniter)
        assert styles == [
            (0, 4, root.styleid_default),
            (4, 7, root.tokens.keyword.styleid_token)]

        tokeniter = syntax_highlight.begin_tokenizer(
            doc, root, 6)

        doc.styles.setints(4, 7, root.tokens.keyword.styleid_token)
        styles = list(tokeniter)
        assert styles == [
            (0, 4, root.styleid_default),
            (4, 7, root.tokens.keyword.styleid_token)]
Пример #5
0
    def test_token(self):
        doc = self._getdoc(' abc def ghi ')

        root = syntax_highlight.Tokenizer(tokens=[
            ('keyword', syntax_highlight.Keywords('keyword', ['abc', 'ghi']))
        ])

        tokenizer = syntax_highlight.begin_tokenizer(
            doc, root, 0)

        styles = list(tokenizer)
        assert styles == [
            (0, 1, root.styleid_default),
            (1, 4, root.tokens.keyword.styleid_token),
            (4, 9, root.styleid_default),
            (9, 12, root.tokens.keyword.styleid_token),
            (12, 13, root.styleid_default), ]
Пример #6
0
    def test_token(self):
        doc = self._getdoc(' abc def ghi ')

        root = syntax_highlight.Tokenizer(
            tokens=[('keyword',
                     syntax_highlight.Keywords('keyword', ['abc', 'ghi']))])

        tokenizer = syntax_highlight.begin_tokenizer(doc, root, 0)

        styles = list(tokenizer)
        assert styles == [
            (0, 1, root.styleid_default),
            (1, 4, root.tokens.keyword.styleid_token),
            (4, 9, root.styleid_default),
            (9, 12, root.tokens.keyword.styleid_token),
            (12, 13, root.styleid_default),
        ]
Пример #7
0
    def run_tokenizer(self, batch=HIGHLIGHTBATCH):
        range_start, range_end = self._get_highlight_range()

        if self._highlight_done < range_start:
            self._highlight_done = range_start

        if not self._highlight_iter:
            f = max(range_start, self._highlight_done - 1)
            self._highlight_iter = syntax_highlight.begin_tokenizer(
                self.document, self.tokenizer, f)

        updatefrom = self.document.endpos()
        updateto = range_start
        updated = False
        finished = False

        for n, (f, t, style) in enumerate(self._highlight_iter):
            f = max(range_start, f)
            t = min(range_end, t)
            if f < t:
                self.document.styles.setints(f, t, style)

            updated = True
            updatefrom = min(f, updatefrom)
            updateto = max(t, updateto)

            if batch and (n > batch):
                break

            if t >= range_end:
                finished = True
                break
        else:
            finished = True

        if self.document.endpos() == 0 or updated and (updatefrom != updateto):
            self.document.style_updated(updatefrom, updateto)
            self._highlight_done = updateto

        # returns False if finished to terminate idle loop.
        return not finished
Пример #8
0
    def test_nest_resume(self):

        doc = self._getdoc('   abc   123 x abc')

        class SubKeyword(syntax_highlight.Keywords):

            def on_start(self, doc, match):
                pos, terminates = yield from super().on_start(doc, match)
                return (yield from sub.run(doc, pos)), False

        root = syntax_highlight.Tokenizer(tokens=[
            ('subkeyword', SubKeyword('keyword', ['abc']))])

        sub = syntax_highlight.Tokenizer(parent=root, terminates='x', tokens=[
            ('keyword', syntax_highlight.Keywords('keyword', ['123']))])

        doc.styles.setints(0, 3, root.styleid_default)
        doc.styles.setints(3, 6, root.tokens.subkeyword.styleid_token)
        doc.styles.setints(6, 9, sub.styleid_default)
        doc.styles.setints(9, 12, sub.tokens.keyword.styleid_token)

        # parse at blank
        tokeniter = syntax_highlight.begin_tokenizer(
            doc, root, 2)

        styles = list(tokeniter)
        assert styles == [
            (0, 3, root.styleid_default),
            (3, 6, root.tokens.subkeyword.styleid_token),
            (6, 9, sub.styleid_default),
            (9, 12, sub.tokens.keyword.styleid_token),
            (12, 13, sub.styleid_default),
            (13, 15, root.styleid_default),
            (15, 18, root.tokens.subkeyword.styleid_token)]

        # parse at first token
        tokeniter = syntax_highlight.begin_tokenizer(
            doc, root, 4)

        styles = list(tokeniter)
        assert styles == [
            (0, 3, root.styleid_default),
            (3, 6, root.tokens.subkeyword.styleid_token),
            (6, 9, sub.styleid_default),
            (9, 12, sub.tokens.keyword.styleid_token),
            (12, 13, sub.styleid_default),
            (13, 15, root.styleid_default),
            (15, 18, root.tokens.subkeyword.styleid_token)]

        # parse at blank of sub2
        tokeniter = syntax_highlight.begin_tokenizer(
            doc, root, 8)

        styles = list(tokeniter)
        assert styles == [
            (6, 9, sub.styleid_default),
            (9, 12, sub.tokens.keyword.styleid_token),
            (12, 13, sub.styleid_default),
            (13, 15, root.styleid_default),
            (15, 18, root.tokens.subkeyword.styleid_token)]

        # parse at blank of keyword2
        tokeniter = syntax_highlight.begin_tokenizer(
            doc, root, 11)

        styles = list(tokeniter)
        assert styles == [
            (6, 9, sub.styleid_default),
            (9, 12, sub.tokens.keyword.styleid_token),
            (12, 13, sub.styleid_default),
            (13, 15, root.styleid_default),
            (15, 18, root.tokens.subkeyword.styleid_token)]
Пример #9
0
    def test_nest_resume(self):

        doc = self._getdoc('   abc   123 x abc')

        class SubKeyword(syntax_highlight.Keywords):
            def on_start(self, doc, match):
                pos, terminates = yield from super().on_start(doc, match)
                return (yield from sub.run(doc, pos)), False

        root = syntax_highlight.Tokenizer(
            tokens=[('subkeyword', SubKeyword('keyword', ['abc']))])

        sub = syntax_highlight.Tokenizer(
            parent=root,
            terminates='x',
            tokens=[('keyword', syntax_highlight.Keywords('keyword',
                                                          ['123']))])

        doc.styles.setints(0, 3, root.styleid_default)
        doc.styles.setints(3, 6, root.tokens.subkeyword.styleid_token)
        doc.styles.setints(6, 9, sub.styleid_default)
        doc.styles.setints(9, 12, sub.tokens.keyword.styleid_token)

        # parse at blank
        tokeniter = syntax_highlight.begin_tokenizer(doc, root, 2)

        styles = list(tokeniter)
        assert styles == [(0, 3, root.styleid_default),
                          (3, 6, root.tokens.subkeyword.styleid_token),
                          (6, 9, sub.styleid_default),
                          (9, 12, sub.tokens.keyword.styleid_token),
                          (12, 13, sub.styleid_default),
                          (13, 15, root.styleid_default),
                          (15, 18, root.tokens.subkeyword.styleid_token)]

        # parse at first token
        tokeniter = syntax_highlight.begin_tokenizer(doc, root, 4)

        styles = list(tokeniter)
        assert styles == [(0, 3, root.styleid_default),
                          (3, 6, root.tokens.subkeyword.styleid_token),
                          (6, 9, sub.styleid_default),
                          (9, 12, sub.tokens.keyword.styleid_token),
                          (12, 13, sub.styleid_default),
                          (13, 15, root.styleid_default),
                          (15, 18, root.tokens.subkeyword.styleid_token)]

        # parse at blank of sub2
        tokeniter = syntax_highlight.begin_tokenizer(doc, root, 8)

        styles = list(tokeniter)
        assert styles == [(6, 9, sub.styleid_default),
                          (9, 12, sub.tokens.keyword.styleid_token),
                          (12, 13, sub.styleid_default),
                          (13, 15, root.styleid_default),
                          (15, 18, root.tokens.subkeyword.styleid_token)]

        # parse at blank of keyword2
        tokeniter = syntax_highlight.begin_tokenizer(doc, root, 11)

        styles = list(tokeniter)
        assert styles == [(6, 9, sub.styleid_default),
                          (9, 12, sub.tokens.keyword.styleid_token),
                          (12, 13, sub.styleid_default),
                          (13, 15, root.styleid_default),
                          (15, 18, root.tokens.subkeyword.styleid_token)]