Beispiel #1
0
def TokenizeSourceAndRunEcmaPass(source):
  """Tokenize a source and run the EcmaMetaDataPass on it.

  Args:
    source: A source file as a string or file-like object (iterates lines).

  Returns:
    The first token of the resulting token stream.
  """
  start_token = TokenizeSource(source)
  ecma_pass = ecmametadatapass.EcmaMetaDataPass()
  ecma_pass.Process(start_token)
  return start_token
Beispiel #2
0
def Run(filename, error_handler, source=None):
    """Tokenize, run passes, and check the given file.

  Args:
    filename: The path of the file to check
    error_handler: The error handler to report errors to.
    source: A file-like object with the file source. If omitted, the file will
      be read from the filename path.
  """
    if not source:
        try:
            source = open(filename)
        except IOError:
            error_handler.HandleFile(filename, None)
            error_handler.HandleError(
                error.Error(errors.FILE_NOT_FOUND, 'File not found'))
            error_handler.FinishFile()
            return

    if _IsHtml(filename):
        source_file = htmlutil.GetScriptLines(source)
    else:
        source_file = source

    token, tokenizer_mode = _Tokenize(source_file)

    error_handler.HandleFile(filename, token)

    # If we did not end in the basic mode, this a failed parse.
    if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
        error_handler.HandleError(
            error.Error(errors.FILE_IN_BLOCK,
                        'File ended in mode "%s".' % tokenizer_mode,
                        _GetLastNonWhiteSpaceToken(token)))

    # Run the ECMA pass
    error_token = None

    ecma_pass = ecmametadatapass.EcmaMetaDataPass()
    error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)

    is_limited_doc_check = (_IsLimitedDocCheck(filename,
                                               flags.FLAGS.limited_doc_files))

    _RunChecker(token,
                error_handler,
                is_limited_doc_check,
                is_html=_IsHtml(filename),
                stop_token=error_token)

    error_handler.FinishFile()
  def __init__(self, error_handler):
    """Initialize an JavaScriptStyleChecker object.

    Args:
      error_handler: Error handler to pass all errors to
    """
    checkerbase.CheckerBase.__init__(
        self,
        error_handler=error_handler,
        lint_rules=javascriptlintrules.JavaScriptLintRules(),
        state_tracker=javascriptstatetracker.JavaScriptStateTracker(
            closurized_namespaces=flags.FLAGS.closurized_namespaces),
        metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
        limited_doc_files=flags.FLAGS.limited_doc_files)
    def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
                                      ignored_extra_namespaces):
        """Returns a namespaces info initialized with the given token stream."""
        namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
            closurized_namespaces=closurized_namespaces,
            ignored_extra_namespaces=ignored_extra_namespaces)
        state_tracker = javascriptstatetracker.JavaScriptStateTracker()

        ecma_pass = ecmametadatapass.EcmaMetaDataPass()
        ecma_pass.Process(token)

        alias_pass = aliaspass.AliasPass(closurized_namespaces)
        alias_pass.Process(token)

        while token:
            namespaces_info.ProcessToken(token, state_tracker)
            token = token.next

        return namespaces_info
Beispiel #5
0
    def __init__(self, error_handler):
        """Initialize an JavaScriptStyleChecker object.

    Args:
      error_handler: Error handler to pass all errors to
    """
        self._namespaces_info = None
        if flags.FLAGS.closurized_namespaces:
            self._namespaces_info = (
                closurizednamespacesinfo.ClosurizedNamespacesInfo(
                    flags.FLAGS.closurized_namespaces,
                    flags.FLAGS.ignored_extra_namespaces))

        checkerbase.CheckerBase.__init__(
            self,
            error_handler=error_handler,
            lint_rules=javascriptlintrules.JavaScriptLintRules(
                self._namespaces_info),
            state_tracker=javascriptstatetracker.JavaScriptStateTracker(),
            metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
            limited_doc_files=flags.FLAGS.limited_doc_files)
class RequireProvideSorterTest(googletest.TestCase):
    """Tests for RequireProvideSorter."""

    _tokenizer = javascripttokenizer.JavaScriptTokenizer()
    _metadata_pass = ecmametadatapass.EcmaMetaDataPass()

    def testFixRequires_removeBlankLines(self):
        """Tests that blank lines are omitted in sorted goog.require statements."""
        input_lines = [
            'goog.provide(\'package.subpackage.Whatever\');', '',
            'goog.require(\'package.subpackage.ClassB\');', '',
            'goog.require(\'package.subpackage.ClassA\');'
        ]
        expected_lines = [
            'goog.provide(\'package.subpackage.Whatever\');', '',
            'goog.require(\'package.subpackage.ClassA\');',
            'goog.require(\'package.subpackage.ClassB\');'
        ]
        token = self._tokenizer.TokenizeFile(input_lines)
        self._metadata_pass.Reset()
        self._metadata_pass.Process(token)

        sorter = requireprovidesorter.RequireProvideSorter()
        sorter.FixRequires(token)

        self.assertEquals(expected_lines, self._GetLines(token))

    def _GetLines(self, token):
        """Returns an array of lines based on the specified token stream."""
        lines = []
        line = ''
        while token:
            line += token.string
            if token.IsLastInLine():
                lines.append(line)
                line = ''
            token = token.next
        return lines