コード例 #1
0
 def testRealAnnotationDefinition(self):
     real_definition = """
     DefineNetworkTrafficAnnotation("foobar_fetcher", R"(
       semantics {
         sender: "Foobar Component"
         description: "Fetches Foobars for the user."
         trigger: "The user requests a new Foobar."
         data: "The type of Foobar the user requested."
         destination: GOOGLE_OWNED_SERVICE
       }
       policy {
         cookies_allowed: NO
         setting: "Privacy and Security > Enable Foobars"
         chrome_policy {
           FoobarsEnabled {
             FoobarsEnabled: false
           }
         }
       })");"""
     tokenizer = Tokenizer(real_definition,
                           'components/foobar/foobar_request_handler.cc',
                           42)
     self.assertEqual('DefineNetworkTrafficAnnotation',
                      tokenizer.advance('symbol'))
     self.assertEqual('(', tokenizer.advance('left_paren'))
     self.assertEqual('foobar_fetcher', tokenizer.advance('string_literal'))
     self.assertEqual(',', tokenizer.advance('comma'))
     self.assertTrue(bool(tokenizer.advance('string_literal')))
     self.assertEqual(')', tokenizer.advance('right_paren'))
コード例 #2
0
ファイル: extractor.py プロジェクト: luojiguicai/chromium
    def _parse_string(self, tokenizer: Tokenizer) -> str:
        """Parse a string value.

    It could be a string literal by itself, or multiple string literals
    concatenated together. Add a newline to the string for each
    concatenation."""
        text = tokenizer.advance('string_literal')
        while True:
            # Perform concatenations.
            if tokenizer.maybe_advance('plus') is None:
                break
            text += '\n'
            text += tokenizer.advance('string_literal')
        return text
コード例 #3
0
 def testAdvanceHappyPath(self):
     tokenizer = Tokenizer('"hello", R"(world)", function_name())));',
                           'foo.txt', 33)
     self.assertEqual('hello', tokenizer.advance('string_literal'))
     self.assertEqual(',', tokenizer.advance('comma'))
     self.assertEqual('world', tokenizer.advance('string_literal'))
     self.assertEqual(',', tokenizer.advance('comma'))
     self.assertEqual('function_name', tokenizer.advance('symbol'))
     self.assertEqual('(', tokenizer.advance('left_paren'))
     self.assertEqual(')', tokenizer.advance('right_paren'))
     self.assertEqual(')', tokenizer.advance('right_paren'))
コード例 #4
0
 def testMaybeAdvance(self):
     tokenizer = Tokenizer('"hello", world', 'foo.txt', 33)
     self.assertEqual(None, tokenizer.maybe_advance('symbol'))
     self.assertEqual('hello', tokenizer.maybe_advance('string_literal'))
     self.assertEqual(',', tokenizer.maybe_advance('comma'))
     self.assertEqual(None, tokenizer.maybe_advance('left_paren'))
     self.assertEqual('world', tokenizer.maybe_advance('symbol'))
     self.assertEqual(None, tokenizer.maybe_advance('right_paren'))
コード例 #5
0
 def testConcatenatedStrings(self):
     tokenizer = Tokenizer('"hello " + "world" + "!"', 'foo.java', 22)
     self.assertEqual('hello ', tokenizer.advance('string_literal'))
     self.assertEqual('+', tokenizer.advance('plus'))
     self.assertEqual('world', tokenizer.advance('string_literal'))
     self.assertEqual('+', tokenizer.advance('plus'))
     self.assertEqual('!', tokenizer.advance('string_literal'))
コード例 #6
0
ファイル: extractor.py プロジェクト: guiyinlangzi/chromium
    def _parse_body(self, body):
        """Tokenizes and parses the arguments given to the definition function."""
        # Don't bother parsing CreateMutableNetworkTrafficAnnotationTag(), we don't
        # care about its arguments anyways.
        if self.type_name == 'Mutable':
            return

        tokenizer = Tokenizer(body, self.file_path, self.line_number)

        # unique_id
        self.unique_id = tokenizer.advance('string_literal')
        tokenizer.advance('comma')

        # extra_id (Partial/BranchedCompleting)
        if self.type_name == 'Partial' or self.type_name == 'BranchedCompleting':
            self.extra_id = tokenizer.advance('string_literal')
            tokenizer.advance('comma')

        # partial_annotation (Completing/BranchedCompleting)
        if self.type_name == 'Completing' or self.type_name == 'BranchedCompleting':
            # Skip the |partial_annotation| argument. It can be a variable_name, or a
            # FunctionName(), so skip the parentheses if they're there.
            tokenizer.advance('symbol')
            if tokenizer.maybe_advance('left_paren'):
                tokenizer.advance('right_paren')
            tokenizer.advance('comma')

        # proto text
        self.text = tokenizer.advance('string_literal')

        # The function call should end here without any more arguments.
        assert tokenizer.advance('right_paren')
コード例 #7
0
    def testAdvanceErrorPaths(self):
        tokenizer = Tokenizer('  hello , ', 'foo.txt', 33)
        tokenizer.advance('symbol')
        with self.assertRaisesRegexp(SourceCodeParsingError,
                                     'Expected symbol.+at foo.txt:33'):
            # There are no more tokens.
            tokenizer.advance('symbol')

        tokenizer = Tokenizer('"hello"', 'foo.txt', 33)
        with self.assertRaisesRegexp(SourceCodeParsingError,
                                     'Expected comma.+at foo.txt:33'):
            # The type doesn't match.
            tokenizer.advance('comma')

        tokenizer = Tokenizer('{', 'foo.txt', 33)
        with self.assertRaisesRegexp(SourceCodeParsingError,
                                     'Expected string_literal.+at foo.txt:33'):
            # Not a valid token at all.
            tokenizer.advance('string_literal')
コード例 #8
0
 def testAdvanceMultiline(self):
     tokenizer = Tokenizer('\n\tR"(the quick\nbrown\nfox)"', 'foo.txt', 33)
     self.assertEqual('the quick\nbrown\nfox',
                      tokenizer.advance('string_literal'))