def test_comments_newline2(self):
     scanner = WordScanner('before# comment\nafter')
     self.assertEqual(
         scanner.get_token(),
         (WordScanner.TokenEnum.WORD, 'before'))
     self.assertEqual(
         scanner.get_token(),
         (WordScanner.TokenEnum.WORD, 'after'))
     self.assertEqual(
         scanner.get_token(),
         (WordScanner.TokenEnum.EOF, ''))
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "WordList":
        """
        Parse a list of words.

        Words are naturally separated by whitespace. Words can be quoted using
        double quotes. Words can be optionally separated with commas although
        those are discarded and entirely optional.

        Some basic examples:

            >>> WordList.parse("foo, bar")
            WordList(entries=[Text(text='foo'), Text(text='bar')])
            >>> WordList.parse("foo,bar")
            WordList(entries=[Text(text='foo'), Text(text='bar')])
            >>> WordList.parse("foo,,,,bar")
            WordList(entries=[Text(text='foo'), Text(text='bar')])
            >>> WordList.parse("foo,,,,bar,,")
            WordList(entries=[Text(text='foo'), Text(text='bar')])

        Words can be quoted, this allows us to include all kinds of characters
        inside:

            >>> WordList.parse('"foo bar"')
            WordList(entries=[Text(text='foo bar')])

        One word of caution, since we use one (and not a very smart one at
        that) scanner, the equals sign is recognized and rejected as incorrect
        input.

            >>> WordList.parse("=")
            WordList(entries=[Error(msg="Unexpected input: '='")])

        """
        entries = []
        scanner = WordScanner(text)
        while True:
            token, lexeme = scanner.get_token()
            if token == scanner.TOKEN_EOF:
                break
            elif token == scanner.TokenEnum.COMMA:
                continue
            elif token == scanner.TokenEnum.WORD:
                entries.append(Text(lineno, col_offset, lexeme))
            else:
                entries.append(
                    Error(lineno, col_offset,
                          "Unexpected input: {!r}".format(lexeme)))
        return WordList(lineno, col_offset, entries)
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "IncludeStmtList":
        """
        Parse a multi-line ``include`` field.

        This field is a simple list of :class:`IncludeStmt` with the added
        twist that empty lines (including lines containing just irrelevant
        white-space or comments) are silently ignored.


        Example:
            >>> IncludeStmtList.parse('''
            ...                       foo
            ...                       # comment
            ...                       bar''')
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmtList(entries=[IncludeStmt(pattern=ReFixed(text='foo'),
                                                 overrides=[]),
                                     IncludeStmt(pattern=ReFixed(text='bar'),
                                                 overrides=[])])
        """
        entries = []
        initial_lineno = lineno
        # NOTE: lineno is consciously shadowed below
        for lineno, line in enumerate(text.splitlines(), lineno):
            if WordScanner(line).get_token()[0] == WordScanner.TOKEN_EOF:
                # XXX: hack to work around the fact that each line is scanned
                # separately so there is no way to naturally progress to the
                # next line yet.
                continue
            entries.append(IncludeStmt.parse(line, lineno, col_offset))
        return IncludeStmtList(initial_lineno, col_offset, entries)
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "Union[IncludeStmt, Error]":
        """
        Parse a single test plan include line

        Using correct syntax will result in a IncludeStmt node with
        appropriate data in the ``pattern`` and ``overrides`` fields. Note that
        ``pattern`` may be either a :class:`RePattern` or a :class:`ReFixed` or
        :class:`ReErr` which is not a valid pattern and cannot be used.
        Overrides are a list of :class:`OverrideExpression`. The list may
        contain incorrect, or duplicate values but that's up to higher-level
        analysis to check for.

        The whole overrides section is optional so a single pattern is a good
        include statement:

            >>> IncludeStmt.parse("usb.*")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[])

        Any number of key=value override pairs can be used using commas in
        between each pair:

            >>> IncludeStmt.parse("usb.* f1=o1")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[OverrideExpression(field=Text(text='f1'),
                                                      value=Text(text='o1'))])
            >>> IncludeStmt.parse("usb.* f1=o1, f2=o2")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[OverrideExpression(field=Text(text='f1'),
                                                      value=Text(text='o1')),
                                   OverrideExpression(field=Text(text='f2'),
                                                      value=Text(text='o2'))])
            >>> IncludeStmt.parse("usb.* f1=o1, f2=o2, f3=o3")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[OverrideExpression(field=Text(text='f1'),
                                                      value=Text(text='o1')),
                                   OverrideExpression(field=Text(text='f2'),
                                                      value=Text(text='o2')),
                                   OverrideExpression(field=Text(text='f3'),
                                                      value=Text(text='o3'))])

        Obviously some things can fail, the following examples show various
        error states that are possible. In each state an Error node is returned
        instead of the whole statement.

            >>> IncludeStmt.parse("")
            Error(msg='expected pattern')
            >>> IncludeStmt.parse("pattern field")
            Error(msg="expected '='")
            >>> IncludeStmt.parse("pattern field=")
            Error(msg='expected override value')
            >>> IncludeStmt.parse("pattern field=override junk")
            Error(msg="expected ','")
            >>> IncludeStmt.parse("pattern field=override, ")
            Error(msg='expected override field')
        """
        scanner = WordScanner(text)
        # PATTERN ...
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.WORD:
            return Error(lineno, col_offset, _("expected pattern"))
        pattern = Re.parse(lexeme, lineno, col_offset)
        overrides = []
        for i in itertools.count():
            # PATTERN FIELD ...
            token, lexeme = scanner.get_token()
            if token == scanner.TokenEnum.EOF and i == 0:
                # The whole override section is optional so the sequence may
                # end with EOF on the first iteration of the loop.
                break
            elif token != scanner.TokenEnum.WORD:
                return Error(lineno, col_offset, _("expected override field"))
            field = Text(lineno, col_offset, lexeme)
            # PATTERN FIELD = ...
            token, lexeme = scanner.get_token()
            if token != scanner.TokenEnum.EQUALS:
                return Error(lineno, col_offset, _("expected '='"))
            # PATTERN FIELD = VALUE ...
            token, lexeme = scanner.get_token()
            if token != scanner.TokenEnum.WORD:
                return Error(lineno, col_offset, _("expected override value"))
            value = Text(lineno, col_offset, lexeme)
            expr = OverrideExpression(lineno, col_offset, field, value)
            overrides.append(expr)
            # is there any more?
            # PATTERN FIELD = VALUE , ...
            token, lexeme = scanner.get_token()
            if token == scanner.TokenEnum.COMMA:
                # (and again)
                continue
            elif token == scanner.TokenEnum.EOF:
                break
            else:
                return Error(lineno, col_offset, _("expected ','"))
        return IncludeStmt(lineno, col_offset, pattern, overrides)
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "Union[FieldOverride, Error]":
        """
        Parse a single test plan field override line

        Using correct syntax will result in a FieldOverride node with
        appropriate data in the ``value`` and ``pattern`` fields. Note that
        ``pattern`` may be either a :class:`RePattern` or a :class:`ReFixed` or
        :class:`ReErr` which is not a valid pattern and cannot be used.

            >>> FieldOverride.parse("apply new-value to pattern")
            ... # doctest: +NORMALIZE_WHITESPACE
            FieldOverride(value=Text(text='new-value'),
                          pattern=ReFixed(text='pattern'))
            >>> FieldOverride.parse("apply blocker to .*")
            ... # doctest: +NORMALIZE_WHITESPACE
            FieldOverride(value=Text(text='blocker'),
                          pattern=RePattern(text='.*', re=re.compile('.*')))

        Using incorrect syntax will result in a single Error node being
        returned. The message (``msg``) field contains useful information on
        the cause of the problem, as depicted below:

            >>> FieldOverride.parse("")
            Error(msg="expected 'apply' near ''")
            >>> FieldOverride.parse("apply")
            Error(msg='expected override value')
            >>> FieldOverride.parse("apply value")
            Error(msg="expected 'to' near ''")
            >>> FieldOverride.parse("apply value to")
            Error(msg='expected override pattern')
            >>> FieldOverride.parse("apply value to pattern junk")
            Error(msg="unexpected garbage: 'junk'")

        Lastly, shell-style comments are supported. They are discarded by the
        scanner code though.

            >>> FieldOverride.parse("apply value to pattern # comment")
            ... # doctest: +NORMALIZE_WHITESPACE
            FieldOverride(value=Text(text='value'),
                          pattern=ReFixed(text='pattern'))

        """
        # XXX  Until our home-grown scanner is ready col_offset values below
        # are all dummy. This is not strictly critical but should be improved
        # upon later.
        scanner = WordScanner(text)
        # 'APPLY' ...
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.WORD or lexeme != 'apply':
            return Error(lineno, col_offset,
                         _("expected {!a} near {!r}").format('apply', lexeme))
        # 'APPLY' VALUE ...
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.WORD:
            return Error(lineno, col_offset, _("expected override value"))
        value = Text(lineno, col_offset, lexeme)
        # 'APPLY' VALUE 'TO' ...
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.WORD or lexeme != 'to':
            return Error(lineno, col_offset,
                         _("expected {!a} near {!r}").format('to', lexeme))
        # 'APPLY' VALUE 'TO' PATTERN...
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.WORD:
            return Error(lineno, col_offset, _("expected override pattern"))
        pattern = Re.parse(lexeme, lineno, col_offset)
        # 'APPLY' VALUE 'TO' PATTERN <EOF>
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.EOF:
            return Error(lineno, col_offset,
                         _("unexpected garbage: {!r}").format(lexeme))
        return FieldOverride(lineno, col_offset, value, pattern)
 def test_comments_eof(self):
     scanner = WordScanner('# comment')
     self.assertEqual(
         scanner.get_token(),
         (WordScanner.TokenEnum.EOF, ''))
 def test_comments_newline1(self):
     self.assertEqual(
         WordScanner('# comment\n').get_token(),
         (WordScanner.TokenEnum.EOF, ''))