def HandleToken(self, token, last_non_space_token):
        """Handles the given token and updates state.

    Args:
      token: The token to handle.
      last_non_space_token:
    """
        super(JavaScriptStateTracker,
              self).HandleToken(token, last_non_space_token)

        if token.IsType(Type.IDENTIFIER):
            if token.string == 'goog.require':
                class_token = tokenutil.Search(token, Type.STRING_TEXT)
                self.__goog_require_tokens.append(class_token)

            elif token.string == 'goog.provide':
                class_token = tokenutil.Search(token, Type.STRING_TEXT)
                self.__goog_provide_tokens.append(class_token)

            elif self.__closurized_namespaces:
                self.__AddUsedNamespace(token.string)
        if token.IsType(Type.SIMPLE_LVALUE) and not self.InFunction():
            identifier = token.values['identifier']

            if self.__closurized_namespaces:
                namespace = self.GetClosurizedNamespace(identifier)
                if namespace and identifier == namespace:
                    self.__provided_namespaces.add(namespace)
        if (self.__closurized_namespaces and token.IsType(Type.DOC_FLAG)
                and token.attached_object.flag_type == 'implements'):
            # Interfaces should be goog.require'd.
            doc_start = tokenutil.Search(token, Type.DOC_START_BRACE)
            interface = tokenutil.Search(doc_start, Type.COMMENT)
            self.__AddUsedNamespace(interface.string)
Beispiel #2
0
    def IsExtraRequire(self, token):
        """Returns whether the given goog.require token is unnecessary.

    Args:
      token: A goog.require token.

    Returns:
      True if the given token corresponds to an unnecessary goog.require
      statement, otherwise False.
    """
        namespace = tokenutil.Search(token, Type.STRING_TEXT).string

        base_namespace = namespace.split('.')[0]
        if base_namespace not in self.__closurized_namespaces:
            return False

        if namespace in self.__ignored_extra_namespaces:
            return False

        if token in self.__duplicate_require_tokens:
            return True

        # TODO(user): There's probably a faster way to compute this.
        for used_namespace, used_identifier in self.__used_namespaces:
            if namespace == used_namespace or namespace == used_identifier:
                return False

        return True
Beispiel #3
0
    def IsExtraProvide(self, token):
        """Returns whether the given goog.provide token is unnecessary.

    Args:
      token: A goog.provide token.

    Returns:
      True if the given token corresponds to an unnecessary goog.provide
      statement, otherwise False.
    """
        if self._scopified_file:
            return False

        namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string

        base_namespace = namespace.split('.', 1)[0]
        if base_namespace not in self._closurized_namespaces:
            return False

        if token in self._duplicate_provide_tokens:
            return True

        # TODO(user): There's probably a faster way to compute this.
        for created_namespace, created_identifier in self._created_namespaces:
            if namespace == created_namespace or namespace == created_identifier:
                return False

        return True
    def _GetTokensMap(self, tokens):
        """Gets a map from object name to tokens associated with that object.

    Starting from the goog.provide/goog.require token, searches backwards in the
    token stream for any lines that start with a comment. These lines are
    associated with the goog.provide/goog.require token. Also associates any
    tokens on the same line as the goog.provide/goog.require token with that
    token.

    Args:
      tokens: A list of goog.provide or goog.require tokens.

    Returns:
      A dictionary that maps object names to the tokens associated with the
      goog.provide or goog.require of that object name. For example:

      {
        'object.a': [JavaScriptToken, JavaScriptToken, ...],
        'object.b': [...]
      }

      The list of tokens includes any comment lines above the goog.provide or
      goog.require statement and everything after the statement on the same
      line. For example, all of the following would be associated with
      'object.a':

      /** @suppress {extraRequire} */
      goog.require('object.a'); // Some comment.
    """
        tokens_map = {}
        for token in tokens:
            object_name = tokenutil.Search(token, Type.STRING_TEXT).string
            # If the previous line starts with a comment, presume that the comment
            # relates to the goog.require or goog.provide and keep them together when
            # sorting.
            first_token = token
            previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
                first_token)
            while (previous_first_token
                   and previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
                first_token = previous_first_token
                previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
                    first_token)

            # Find the last token on the line.
            last_token = tokenutil.GetLastTokenInSameLine(token)

            all_tokens = self._GetTokenList(first_token, last_token)
            tokens_map[object_name] = all_tokens
        return tokens_map
    def testIsExtraProvide_duplicate(self):
        """Tests that providing a namespace twice makes the second one extra."""
        input_lines = [
            'goog.provide(\'package.Foo\');', 'goog.provide(\'package.Foo\');',
            'package.Foo = function() {};'
        ]
        token = testutil.TokenizeSource(input_lines)
        namespaces_info = self._GetInitializedNamespacesInfo(
            token, ['package'], [])

        # Advance to the second goog.provide token.
        token = tokenutil.Search(token.next, TokenType.IDENTIFIER)

        self.assertTrue(namespaces_info.IsExtraProvide(token),
                        'Should be extra since it is already provided.')
    def _GetRequireOrProvideTokenStrings(self, tokens):
        """Gets a list of strings corresponding to the given list of tokens.

    The string will be the next string in the token stream after each token in
    tokens. This is used to find the object being provided/required by a given
    goog.provide or goog.require token.

    Args:
      tokens: A list of goog.provide or goog.require tokens.

    Returns:
      A list of object names that are being provided or required by the given
      list of tokens. For example:

      ['object.a', 'object.c', 'object.b']
    """
        token_strings = []
        for token in tokens:
            name = tokenutil.Search(token, Type.STRING_TEXT).string
            token_strings.append(name)
        return token_strings
Beispiel #7
0
    def IsExtraRequire(self, token):
        """Returns whether the given goog.require token is unnecessary.

    Args:
      token: A goog.require token.

    Returns:
      True if the given token corresponds to an unnecessary goog.require
      statement, otherwise False.
    """
        if self._scopified_file:
            return False

        namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string

        base_namespace = namespace.split('.', 1)[0]
        if base_namespace not in self._closurized_namespaces:
            return False

        if namespace in self._ignored_extra_namespaces:
            return False

        if token in self._duplicate_require_tokens:
            return True

        if namespace in self._suppressed_requires:
            return False

        # If the namespace contains a component that is initial caps, then that
        # must be the last component of the namespace.
        parts = namespace.split('.')
        if len(parts) > 1 and parts[-2][0].isupper():
            return True

        # TODO(user): There's probably a faster way to compute this.
        for used_namespace, used_identifier in self._used_namespaces:
            if namespace == used_namespace or namespace == used_identifier:
                return False

        return True
Beispiel #8
0
    def HandleError(self, error):
        """Attempts to fix the error.

    Args:
      error: The error object
    """
        code = error.code
        token = error.token

        if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
            iterator = token.attached_object.type_start_token
            if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(
            ):
                iterator = iterator.next

            leading_space = len(iterator.string) - len(
                iterator.string.lstrip())
            iterator.string = '%s?%s' % (' ' * leading_space,
                                         iterator.string.lstrip())

            # Cover the no outer brace case where the end token is part of the type.
            while iterator and iterator != token.attached_object.type_end_token.next:
                iterator.string = iterator.string.replace('null|', '').replace(
                    '|null', '')
                iterator = iterator.next

            # Create a new flag object with updated type info.
            token.attached_object = javascriptstatetracker.JsDocFlag(token)
            self._AddFix(token)

        elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                      errors.MISSING_SEMICOLON):
            semicolon_token = Token(';', Type.SEMICOLON, token.line,
                                    token.line_number)
            tokenutil.InsertTokenAfter(semicolon_token, token)
            token.metadata.is_implied_semicolon = False
            semicolon_token.metadata.is_implied_semicolon = False
            self._AddFix(token)

        elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                      errors.REDUNDANT_SEMICOLON,
                      errors.COMMA_AT_END_OF_LITERAL):
            tokenutil.DeleteToken(token)
            self._AddFix(token)

        elif code == errors.INVALID_JSDOC_TAG:
            if token.string == '@returns':
                token.string = '@return'
                self._AddFix(token)

        elif code == errors.FILE_MISSING_NEWLINE:
            # This error is fixed implicitly by the way we restore the file
            self._AddFix(token)

        elif code == errors.MISSING_SPACE:
            if error.position:
                if error.position.IsAtBeginning():
                    tokenutil.InsertSpaceTokenAfter(token.previous)
                elif error.position.IsAtEnd(token.string):
                    tokenutil.InsertSpaceTokenAfter(token)
                else:
                    token.string = error.position.Set(token.string, ' ')
                self._AddFix(token)

        elif code == errors.EXTRA_SPACE:
            if error.position:
                token.string = error.position.Set(token.string, '')
                self._AddFix(token)

        elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
            token.string = error.position.Set(token.string, '.')
            self._AddFix(token)

        elif code == errors.MISSING_LINE:
            if error.position.IsAtBeginning():
                tokenutil.InsertLineAfter(token.previous)
            else:
                tokenutil.InsertLineAfter(token)
            self._AddFix(token)

        elif code == errors.EXTRA_LINE:
            tokenutil.DeleteToken(token)
            self._AddFix(token)

        elif code == errors.WRONG_BLANK_LINE_COUNT:
            if not token.previous:
                # TODO(user): Add an insertBefore method to tokenutil.
                return

            num_lines = error.fix_data
            should_delete = False

            if num_lines < 0:
                num_lines = num_lines * -1
                should_delete = True

            for i in xrange(1, num_lines + 1):
                if should_delete:
                    # TODO(user): DeleteToken should update line numbers.
                    tokenutil.DeleteToken(token.previous)
                else:
                    tokenutil.InsertLineAfter(token.previous)
                self._AddFix(token)

        elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
            end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
            if end_quote:
                single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START,
                                           token.line, token.line_number)
                single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START,
                                         end_quote.line, token.line_number)

                tokenutil.InsertTokenAfter(single_quote_start, token)
                tokenutil.InsertTokenAfter(single_quote_end, end_quote)
                tokenutil.DeleteToken(token)
                tokenutil.DeleteToken(end_quote)
                self._AddFix([token, end_quote])

        elif code == errors.MISSING_BRACES_AROUND_TYPE:
            fixed_tokens = []
            start_token = token.attached_object.type_start_token

            if start_token.type != Type.DOC_START_BRACE:
                leading_space = (len(start_token.string) -
                                 len(start_token.string.lstrip()))
                if leading_space:
                    start_token = tokenutil.SplitToken(start_token,
                                                       leading_space)
                    # Fix case where start and end token were the same.
                    if token.attached_object.type_end_token == start_token.previous:
                        token.attached_object.type_end_token = start_token

                new_token = Token("{", Type.DOC_START_BRACE, start_token.line,
                                  start_token.line_number)
                tokenutil.InsertTokenAfter(new_token, start_token.previous)
                token.attached_object.type_start_token = new_token
                fixed_tokens.append(new_token)

            end_token = token.attached_object.type_end_token
            if end_token.type != Type.DOC_END_BRACE:
                # If the start token was a brace, the end token will be a
                # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
                # the end token is the last token of the actual type.
                last_type = end_token
                if not len(fixed_tokens):
                    last_type = end_token.previous

                while last_type.string.isspace():
                    last_type = last_type.previous

                # If there was no starting brace then a lone end brace wouldn't have
                # been type end token. Now that we've added any missing start brace,
                # see if the last effective type token was an end brace.
                if last_type.type != Type.DOC_END_BRACE:
                    trailing_space = (len(last_type.string) -
                                      len(last_type.string.rstrip()))
                    if trailing_space:
                        tokenutil.SplitToken(
                            last_type,
                            len(last_type.string) - trailing_space)

                    new_token = Token("}", Type.DOC_END_BRACE, last_type.line,
                                      last_type.line_number)
                    tokenutil.InsertTokenAfter(new_token, last_type)
                    token.attached_object.type_end_token = new_token
                    fixed_tokens.append(new_token)

            self._AddFix(fixed_tokens)

        elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
                      errors.GOOG_PROVIDES_NOT_ALPHABETIZED):
            tokens = error.fix_data
            strings = map(lambda x: x.string, tokens)
            sorted_strings = sorted(strings)

            index = 0
            changed_tokens = []
            for token in tokens:
                if token.string != sorted_strings[index]:
                    token.string = sorted_strings[index]
                    changed_tokens.append(token)
                index += 1

            self._AddFix(changed_tokens)

        elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
            if token.previous.string == '{' and token.next.string == '}':
                tokenutil.DeleteToken(token.previous)
                tokenutil.DeleteToken(token.next)
                self._AddFix([token])

        elif (code == errors.WRONG_INDENTATION
              and not FLAGS.disable_indentation_fixing):
            token = tokenutil.GetFirstTokenInSameLine(token)
            actual = error.position.start
            expected = error.position.length

            if token.type in (Type.WHITESPACE, Type.PARAMETERS):
                token.string = token.string.lstrip() + (' ' * expected)
                self._AddFix([token])
            else:
                # We need to add indentation.
                new_token = Token(' ' * expected, Type.WHITESPACE, token.line,
                                  token.line_number)
                # Note that we'll never need to add indentation at the first line,
                # since it will always not be indented.  Therefore it's safe to assume
                # token.previous exists.
                tokenutil.InsertTokenAfter(new_token, token.previous)
                self._AddFix([token])

        elif code == errors.EXTRA_REQUIRE:
            fixed_tokens = []
            while token:
                if token.type == Type.IDENTIFIER:
                    if token.string not in ['goog.require', 'goog.provide']:
                        # Stop iterating over tokens once we're out of the requires and
                        # provides.
                        break
                    if token.string == 'goog.require':
                        # Text of form: goog.require('required'), skipping past open paren
                        # and open quote to the string text.
                        required = token.next.next.next.string
                        if required in error.fix_data:
                            fixed_tokens.append(token)
                            # Want to delete: goog.require + open paren + open single-quote +
                            # text + close single-quote + close paren + semi-colon = 7.
                            tokenutil.DeleteTokens(token, 7)
                token = token.next

            self._AddFix(fixed_tokens)
Beispiel #9
0
    def HandleToken(self, token, last_non_space_token):
        """Handles the given token and updates state.

    Args:
      token: The token to handle.
      last_non_space_token:
    """
        self._is_block_close = False

        if not self._first_token:
            self._first_token = token

        # Track block depth.
        type = token.type
        if type == Type.START_BLOCK:
            self._block_depth += 1

            # Subclasses need to handle block start very differently because
            # whether a block is a CODE or OBJECT_LITERAL block varies significantly
            # by language.
            self._block_types.append(self.GetBlockType(token))

            # When entering a function body, record its parameters.
            if self.InFunction():
                function = self._function_stack[-1]
                if self._block_depth == function.block_depth + 1:
                    function.parameters = self.GetParams()

        # Track block depth.
        elif type == Type.END_BLOCK:
            self._is_block_close = not self.InObjectLiteral()
            self._block_depth -= 1
            self._block_types.pop()

        # Track parentheses depth.
        elif type == Type.START_PAREN:
            self._paren_depth += 1

        # Track parentheses depth.
        elif type == Type.END_PAREN:
            self._paren_depth -= 1

        elif type == Type.COMMENT:
            self._last_comment = token.string

        elif type == Type.START_DOC_COMMENT:
            self._last_comment = None
            self._doc_comment = DocComment(token)

        elif type == Type.END_DOC_COMMENT:
            self._doc_comment.end_token = token

        elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
            flag = self._doc_flag(token)
            token.attached_object = flag
            self._doc_comment.AddFlag(flag)

            if flag.flag_type == 'suppress':
                self._doc_comment.AddSuppression(token)

        elif type == Type.FUNCTION_DECLARATION:
            last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
                                               None, True)
            doc = None
            # Only functions outside of parens are eligible for documentation.
            if not self._paren_depth:
                doc = self._doc_comment

            name = ''
            is_assigned = last_code and (
                last_code.IsOperator('=') or last_code.IsOperator('||')
                or last_code.IsOperator('&&') or
                (last_code.IsOperator(':') and not self.InObjectLiteral()))
            if is_assigned:
                # TODO(robbyw): This breaks for x[2] = ...
                # Must use loop to find full function name in the case of line-wrapped
                # declarations (bug 1220601) like:
                # my.function.foo.
                #   bar = function() ...
                identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE,
                                              None, True)
                while identifier and identifier.type in (Type.IDENTIFIER,
                                                         Type.SIMPLE_LVALUE):
                    name = identifier.string + name
                    # Traverse behind us, skipping whitespace and comments.
                    while True:
                        identifier = identifier.previous
                        if not identifier or not identifier.type in Type.NON_CODE_TYPES:
                            break

            else:
                next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
                while next_token and next_token.IsType(Type.FUNCTION_NAME):
                    name += next_token.string
                    next_token = tokenutil.Search(next_token,
                                                  Type.FUNCTION_NAME, 2)

            function = Function(self._block_depth, is_assigned, doc, name)
            function.start_token = token

            self._function_stack.append(function)
            self._functions_by_name[name] = function

            # Add a delimiter in stack for scope variables to define start of
            # function. This helps in popping variables of this function when
            # function declaration ends.
            self._variables_in_scope.append('')

        elif type == Type.START_PARAMETERS:
            self._cumulative_params = ''

        elif type == Type.PARAMETERS:
            self._cumulative_params += token.string
            self._variables_in_scope.extend(self.GetParams())

        elif type == Type.KEYWORD and token.string == 'return':
            next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
            if not next_token.IsType(Type.SEMICOLON):
                function = self.GetFunction()
                if function:
                    function.has_return = True

        elif type == Type.KEYWORD and token.string == 'throw':
            function = self.GetFunction()
            if function:
                function.has_throw = True

        elif type == Type.KEYWORD and token.string == 'var':
            function = self.GetFunction()
            next_token = tokenutil.Search(
                token, [Type.IDENTIFIER, Type.SIMPLE_LVALUE])

            if next_token:
                if next_token.type == Type.SIMPLE_LVALUE:
                    self._variables_in_scope.append(
                        next_token.values['identifier'])
                else:
                    self._variables_in_scope.append(next_token.string)

        elif type == Type.SIMPLE_LVALUE:
            identifier = token.values['identifier']
            jsdoc = self.GetDocComment()
            if jsdoc:
                self._documented_identifiers.add(identifier)

            self._HandleIdentifier(identifier, True)

        elif type == Type.IDENTIFIER:
            self._HandleIdentifier(token.string, False)

            # Detect documented non-assignments.
            next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
            if next_token and next_token.IsType(Type.SEMICOLON):
                if (self._last_non_space_token
                        and self._last_non_space_token.IsType(
                            Type.END_DOC_COMMENT)):
                    self._documented_identifiers.add(token.string)
Beispiel #10
0
    def __init__(self, flag_token):
        """Creates the DocFlag object and attaches it to the given start token.

    Args:
      flag_token: The starting token of the flag.
    """
        self.flag_token = flag_token
        self.flag_type = flag_token.string.strip().lstrip('@')

        # Extract type, if applicable.
        self.type = None
        self.type_start_token = None
        self.type_end_token = None
        if self.flag_type in self.HAS_TYPE:
            brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
                                          Type.FLAG_ENDING_TYPES)
            if brace:
                end_token, contents = _GetMatchingEndBraceAndContents(brace)
                self.type = contents
                self.type_start_token = brace
                self.type_end_token = end_token
            elif (self.flag_type in self.TYPE_ONLY
                  and flag_token.next.type not in Type.FLAG_ENDING_TYPES
                  and flag_token.line_number == flag_token.next.line_number):
                # b/10407058. If the flag is expected to be followed by a type then
                # search for type in same line only. If no token after flag in same
                # line then conclude that no type is specified.
                self.type_start_token = flag_token.next
                self.type_end_token, self.type = _GetEndTokenAndContents(
                    self.type_start_token)
                if self.type is not None:
                    self.type = self.type.strip()

        # Extract name, if applicable.
        self.name_token = None
        self.name = None
        if self.flag_type in self.HAS_NAME:
            # Handle bad case, name could be immediately after flag token.
            self.name_token = _GetNextPartialIdentifierToken(flag_token)

            # Handle good case, if found token is after type start, look for
            # a identifier (substring to cover cases like [cnt] b/4197272) after
            # type end, since types contain identifiers.
            if (self.type and self.name_token and tokenutil.Compare(
                    self.name_token, self.type_start_token) > 0):
                self.name_token = _GetNextPartialIdentifierToken(
                    self.type_end_token)

            if self.name_token:
                self.name = self.name_token.string

        # Extract description, if applicable.
        self.description_start_token = None
        self.description_end_token = None
        self.description = None
        if self.flag_type in self.HAS_DESCRIPTION:
            search_start_token = flag_token
            if self.name_token and self.type_end_token:
                if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
                    search_start_token = self.type_end_token
                else:
                    search_start_token = self.name_token
            elif self.name_token:
                search_start_token = self.name_token
            elif self.type:
                search_start_token = self.type_end_token

            interesting_token = tokenutil.Search(
                search_start_token,
                Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
            if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
                self.description_start_token = interesting_token
                self.description_end_token, self.description = (
                    _GetEndTokenAndContents(interesting_token))
Beispiel #11
0
  def ProcessToken(self, token, state_tracker):
    """Processes the given token for dependency information.

    Args:
      token: The token to process.
      state_tracker: The JavaScript state tracker.
    """

    # Note that this method is in the critical path for the linter and has been
    # optimized for performance in the following ways:
    # - Tokens are checked by type first to minimize the number of function
    #   calls necessary to determine if action needs to be taken for the token.
    # - The most common tokens types are checked for first.
    # - The number of function calls has been minimized (thus the length of this
    #   function.

    if token.type == TokenType.IDENTIFIER:
      # TODO(user): Consider saving the whole identifier in metadata.
      whole_identifier_string = tokenutil.GetIdentifierForToken(token)
      if whole_identifier_string is None:
        # We only want to process the identifier one time. If the whole string
        # identifier is None, that means this token was part of a multi-token
        # identifier, but it was not the first token of the identifier.
        return

      # In the odd case that a goog.require is encountered inside a function,
      # just ignore it (e.g. dynamic loading in test runners).
      if token.string == 'goog.require' and not state_tracker.InFunction():
        self._require_tokens.append(token)
        namespace = tokenutil.GetStringAfterToken(token)
        if namespace in self._required_namespaces:
          self._duplicate_require_tokens.append(token)
        else:
          self._required_namespaces.append(namespace)

        # If there is a suppression for the require, add a usage for it so it
        # gets treated as a regular goog.require (i.e. still gets sorted).
        jsdoc = state_tracker.GetDocComment()
        if jsdoc and ('extraRequire' in jsdoc.suppressions):
          self._suppressed_requires.append(namespace)
          self._AddUsedNamespace(state_tracker, namespace, token.line_number)

      elif token.string == 'goog.provide':
        self._provide_tokens.append(token)
        namespace = tokenutil.GetStringAfterToken(token)
        if namespace in self._provided_namespaces:
          self._duplicate_provide_tokens.append(token)
        else:
          self._provided_namespaces.append(namespace)

        # If there is a suppression for the provide, add a creation for it so it
        # gets treated as a regular goog.provide (i.e. still gets sorted).
        jsdoc = state_tracker.GetDocComment()
        if jsdoc and ('extraProvide' in jsdoc.suppressions):
          self._AddCreatedNamespace(state_tracker, namespace, token.line_number)

      elif token.string == 'goog.scope':
        self._scopified_file = True

      elif token.string == 'goog.setTestOnly':

        # Since the message is optional, we don't want to scan to later lines.
        for t in tokenutil.GetAllTokensInSameLine(token):
          if t.type == TokenType.STRING_TEXT:
            message = t.string

            if re.match(r'^\w+(\.\w+)+$', message):
              # This looks like a namespace. If it's a Closurized namespace,
              # consider it created.
              base_namespace = message.split('.', 1)[0]
              if base_namespace in self._closurized_namespaces:
                self._AddCreatedNamespace(state_tracker, message,
                                          token.line_number)

            break
      else:
        jsdoc = state_tracker.GetDocComment()
        if token.metadata and token.metadata.aliased_symbol:
          whole_identifier_string = token.metadata.aliased_symbol
        if jsdoc and jsdoc.HasFlag('typedef'):
          self._AddCreatedNamespace(state_tracker, whole_identifier_string,
                                    token.line_number,
                                    namespace=self.GetClosurizedNamespace(
                                        whole_identifier_string))
        else:
          if not (token.metadata and token.metadata.is_alias_definition):
            self._AddUsedNamespace(state_tracker, whole_identifier_string,
                                   token.line_number)

    elif token.type == TokenType.SIMPLE_LVALUE:
      identifier = token.values['identifier']
      start_token = tokenutil.GetIdentifierStart(token)
      if start_token and start_token != token:
        # Multi-line identifier being assigned. Get the whole identifier.
        identifier = tokenutil.GetIdentifierForToken(start_token)
      else:
        start_token = token
      # If an alias is defined on the start_token, use it instead.
      if (start_token and
          start_token.metadata and
          start_token.metadata.aliased_symbol and
          not start_token.metadata.is_alias_definition):
        identifier = start_token.metadata.aliased_symbol

      if identifier:
        namespace = self.GetClosurizedNamespace(identifier)
        if state_tracker.InFunction():
          self._AddUsedNamespace(state_tracker, identifier, token.line_number)
        elif namespace and namespace != 'goog':
          self._AddCreatedNamespace(state_tracker, identifier,
                                    token.line_number, namespace=namespace)

    elif token.type == TokenType.DOC_FLAG:
      flag_type = token.attached_object.flag_type
      is_interface = state_tracker.GetDocComment().HasFlag('interface')
      if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
        # Interfaces should be goog.require'd.
        doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
        interface = tokenutil.Search(doc_start, TokenType.COMMENT)
        self._AddUsedNamespace(state_tracker, interface.string,
                               token.line_number)
Beispiel #12
0
    def ProcessToken(self, token, state_tracker):
        """Processes the given token for dependency information.

    Args:
      token: The token to process.
      state_tracker: The JavaScript state tracker.
    """

        # Note that this method is in the critical path for the linter and has been
        # optimized for performance in the following ways:
        # - Tokens are checked by type first to minimize the number of function
        #   calls necessary to determine if action needs to be taken for the token.
        # - The most common tokens types are checked for first.
        # - The number of function calls has been minimized (thus the length of this
        #   function.

        if token.type == TokenType.IDENTIFIER:
            # TODO(user): Consider saving the whole identifier in metadata.
            whole_identifier_string = self._GetWholeIdentifierString(token)
            if whole_identifier_string is None:
                # We only want to process the identifier one time. If the whole string
                # identifier is None, that means this token was part of a multi-token
                # identifier, but it was not the first token of the identifier.
                return

            # In the odd case that a goog.require is encountered inside a function,
            # just ignore it (e.g. dynamic loading in test runners).
            if token.string == 'goog.require' and not state_tracker.InFunction(
            ):
                self._require_tokens.append(token)
                namespace = tokenutil.Search(token,
                                             TokenType.STRING_TEXT).string
                if namespace in self._required_namespaces:
                    self._duplicate_require_tokens.append(token)
                else:
                    self._required_namespaces.append(namespace)

                # If there is a suppression for the require, add a usage for it so it
                # gets treated as a regular goog.require (i.e. still gets sorted).
                jsdoc = state_tracker.GetDocComment()
                if jsdoc and ('extraRequire' in jsdoc.suppressions):
                    self._suppressed_requires.append(namespace)
                    self._AddUsedNamespace(state_tracker, namespace)

            elif token.string == 'goog.provide':
                self._provide_tokens.append(token)
                namespace = tokenutil.Search(token,
                                             TokenType.STRING_TEXT).string
                if namespace in self._provided_namespaces:
                    self._duplicate_provide_tokens.append(token)
                else:
                    self._provided_namespaces.append(namespace)

                # If there is a suppression for the provide, add a creation for it so it
                # gets treated as a regular goog.provide (i.e. still gets sorted).
                jsdoc = state_tracker.GetDocComment()
                if jsdoc and ('extraProvide' in jsdoc.suppressions):
                    self._AddCreatedNamespace(state_tracker, namespace)

            elif token.string == 'goog.scope':
                self._scopified_file = True

            else:
                jsdoc = state_tracker.GetDocComment()
                if jsdoc and jsdoc.HasFlag('typedef'):
                    self._AddCreatedNamespace(
                        state_tracker, whole_identifier_string,
                        self.GetClosurizedNamespace(whole_identifier_string))
                else:
                    self._AddUsedNamespace(state_tracker,
                                           whole_identifier_string)

        elif token.type == TokenType.SIMPLE_LVALUE:
            identifier = token.values['identifier']
            namespace = self.GetClosurizedNamespace(identifier)
            if state_tracker.InFunction():
                self._AddUsedNamespace(state_tracker, identifier)
            elif namespace and namespace != 'goog':
                self._AddCreatedNamespace(state_tracker, identifier, namespace)

        elif token.type == TokenType.DOC_FLAG:
            flag_type = token.attached_object.flag_type
            is_interface = state_tracker.GetDocComment().HasFlag('interface')
            if flag_type == 'implements' or (flag_type == 'extends'
                                             and is_interface):
                # Interfaces should be goog.require'd.
                doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
                interface = tokenutil.Search(doc_start, TokenType.COMMENT)
                self._AddUsedNamespace(state_tracker, interface.string)
Beispiel #13
0
    def ProcessToken(self, token, state_tracker):
        """Processes the given token for dependency information.

    Args:
      token: The token to process.
      state_tracker: The JavaScript state tracker.
    """
        if token.type == Type.IDENTIFIER:
            # TODO(user): Consider saving the whole identifier in metadata.
            whole_identifier_string = self.__GetWholeIdentifierString(token)
            if whole_identifier_string is None:
                # We only want to process the identifier one time. If the whole string
                # identifier is None, that means this token was part of a multi-token
                # identifier, but it was not the first token of the identifier.
                return

            # In the odd case that a goog.require is encountered inside a function,
            # just ignore it (e.g. dynamic loading in test runners).
            if token.string == 'goog.require' and not state_tracker.InFunction(
            ):
                self.__require_tokens.append(token)
                namespace = tokenutil.Search(token, Type.STRING_TEXT).string
                if namespace in self.__required_namespaces:
                    self.__duplicate_require_tokens.append(token)
                else:
                    self.__required_namespaces.append(namespace)

                # If there is a suppression for the require, add a usage for it so it
                # gets treated as a regular goog.require (i.e. still gets sorted).
                jsdoc = state_tracker.GetDocComment()
                if jsdoc and ('extraRequire' in jsdoc.suppressions):
                    self.__AddUsedNamespace(state_tracker, namespace)

            elif token.string == 'goog.provide':
                self.__provide_tokens.append(token)
                namespace = tokenutil.Search(token, Type.STRING_TEXT).string
                if namespace in self.__provided_namespaces:
                    self.__duplicate_provide_tokens.append(token)
                else:
                    self.__provided_namespaces.append(namespace)

                # If there is a suppression for the provide, add a creation for it so it
                # gets treated as a regular goog.provide (i.e. still gets sorted).
                jsdoc = state_tracker.GetDocComment()
                if jsdoc and ('extraProvide' in jsdoc.suppressions):
                    self.__AddCreatedNamespace(state_tracker, namespace)

            else:
                jsdoc = state_tracker.GetDocComment()
                if jsdoc and jsdoc.HasFlag('typedef'):
                    self.__AddCreatedNamespace(state_tracker,
                                               whole_identifier_string)
                else:
                    self.__AddUsedNamespace(state_tracker,
                                            whole_identifier_string)

        elif token.type == Type.SIMPLE_LVALUE:
            identifier = token.values['identifier']
            namespace = self.GetClosurizedNamespace(identifier)
            if state_tracker.InFunction():
                self.__AddUsedNamespace(state_tracker, identifier)
            elif namespace and namespace != 'goog':
                self.__AddCreatedNamespace(state_tracker, identifier,
                                           namespace)

        elif (token.type == Type.DOC_FLAG
              and token.attached_object.flag_type == 'implements'):
            # Interfaces should be goog.require'd.
            doc_start = tokenutil.Search(token, Type.DOC_START_BRACE)
            interface = tokenutil.Search(doc_start, Type.COMMENT)
            self.__AddUsedNamespace(state_tracker, interface.string)
    def CheckToken(self, token, state):
        """Checks a token, given the current parser_state, for warnings and errors.

    Args:
      token: The current token under consideration
      state: parser_state object that indicates the current state in the page
    """

        # For @param don't ignore record type.
        if (self.__ContainsRecordType(token)
                and not token.attached_object.flag_type == 'param'):
            # We should bail out and not emit any warnings for this annotation.
            # TODO(nicksantos): Support record types for real.
            state.GetDocComment().Invalidate()
            return

        # Call the base class's CheckToken function.
        super(JavaScriptLintRules, self).CheckToken(token, state)

        # Store some convenience variables
        namespaces_info = self._namespaces_info

        if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
            self._CheckUnusedLocalVariables(token, state)

        if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
            # Find all assignments to private members.
            if token.type == Type.SIMPLE_LVALUE:
                identifier = token.string
                if identifier.endswith('_') and not identifier.endswith('__'):
                    doc_comment = state.GetDocComment()
                    suppressed = (doc_comment
                                  and doc_comment.HasFlag('suppress')
                                  and doc_comment.GetFlag('suppress').type
                                  == 'underscore')
                    if not suppressed:
                        # Look for static members defined on a provided namespace.
                        if namespaces_info:
                            namespace = namespaces_info.GetClosurizedNamespace(
                                identifier)
                            provided_namespaces = namespaces_info.GetProvidedNamespaces(
                            )
                        else:
                            namespace = None
                            provided_namespaces = set()

                        # Skip cases of this.something_.somethingElse_.
                        regex = re.compile('^this\.[a-zA-Z_]+$')
                        if namespace in provided_namespaces or regex.match(
                                identifier):
                            variable = identifier.split('.')[-1]
                            self._declared_private_member_tokens[
                                variable] = token
                            self._declared_private_members.add(variable)
                elif not identifier.endswith('__'):
                    # Consider setting public members of private members to be a usage.
                    for piece in identifier.split('.'):
                        if piece.endswith('_'):
                            self._used_private_members.add(piece)

            # Find all usages of private members.
            if token.type == Type.IDENTIFIER:
                for piece in token.string.split('.'):
                    if piece.endswith('_'):
                        self._used_private_members.add(piece)

        if token.type == Type.DOC_FLAG:
            flag = token.attached_object

            if flag.flag_type == 'param' and flag.name_token is not None:
                self._CheckForMissingSpaceBeforeToken(
                    token.attached_object.name_token)

                if flag.type is not None and flag.name is not None:
                    if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
                        # Check for variable arguments marker in type.
                        if (flag.type.startswith('...')
                                and not flag.name == 'var_args'):
                            self._HandleError(
                                errors.JSDOC_MISSING_VAR_ARGS_NAME,
                                'Variable length argument %s must be renamed '
                                'to var_args.' % flag.name, token)
                        elif (not flag.type.startswith('...')
                              and flag.name == 'var_args'):
                            self._HandleError(
                                errors.JSDOC_MISSING_VAR_ARGS_TYPE,
                                'Variable length argument %s type must start '
                                'with \'...\'.' % flag.name, token)

                    if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
                        # Check for optional marker in type.
                        if (flag.type.endswith('=')
                                and not flag.name.startswith('opt_')):
                            self._HandleError(
                                errors.JSDOC_MISSING_OPTIONAL_PREFIX,
                                'Optional parameter name %s must be prefixed '
                                'with opt_.' % flag.name, token)
                        elif (not flag.type.endswith('=')
                              and flag.name.startswith('opt_')):
                            self._HandleError(
                                errors.JSDOC_MISSING_OPTIONAL_TYPE,
                                'Optional parameter %s type must end with =.' %
                                flag.name, token)

            if flag.flag_type in state.GetDocFlag().HAS_TYPE:
                # Check for both missing type token and empty type braces '{}'
                # Missing suppress types are reported separately and we allow enums
                # without types.
                if (flag.flag_type not in ('suppress', 'enum')
                        and (not flag.type or flag.type.isspace())):
                    self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
                                      'Missing type in %s tag' % token.string,
                                      token)

                elif flag.name_token and flag.type_end_token and tokenutil.Compare(
                        flag.type_end_token, flag.name_token) > 0:
                    self._HandleError(
                        errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
                        'Type should be immediately after %s tag' %
                        token.string, token)

        elif token.type == Type.DOUBLE_QUOTE_STRING_START:
            next_token = token.next
            while next_token.type == Type.STRING_TEXT:
                if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
                        next_token.string):
                    break
                next_token = next_token.next
            else:
                self._HandleError(
                    errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
                    'Single-quoted string preferred over double-quoted string.',
                    token, Position.All(token.string))

        elif token.type == Type.END_DOC_COMMENT:
            doc_comment = state.GetDocComment()

            # When @externs appears in a @fileoverview comment, it should trigger
            # the same limited doc checks as a special filename like externs.js.
            if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag(
                    'externs'):
                self._SetLimitedDocChecks(True)

            if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL)
                    and not self._is_html and state.InTopLevel()
                    and not state.InNonScopeBlock()):

                # Check if we're in a fileoverview or constructor JsDoc.
                is_constructor = (doc_comment.HasFlag('constructor')
                                  or doc_comment.HasFlag('interface'))
                is_file_overview = doc_comment.HasFlag('fileoverview')

                # If the comment is not a file overview, and it does not immediately
                # precede some code, skip it.
                # NOTE: The tokenutil methods are not used here because of their
                # behavior at the top of a file.
                next_token = token.next
                if (not next_token
                        or (not is_file_overview
                            and next_token.type in Type.NON_CODE_TYPES)):
                    return

                # Don't require extra blank lines around suppression of extra
                # goog.require errors.
                if (doc_comment.SuppressionOnly()
                        and next_token.type == Type.IDENTIFIER and
                        next_token.string in ['goog.provide', 'goog.require']):
                    return

                # Find the start of this block (include comments above the block, unless
                # this is a file overview).
                block_start = doc_comment.start_token
                if not is_file_overview:
                    token = block_start.previous
                    while token and token.type in Type.COMMENT_TYPES:
                        block_start = token
                        token = token.previous

                # Count the number of blank lines before this block.
                blank_lines = 0
                token = block_start.previous
                while token and token.type in [
                        Type.WHITESPACE, Type.BLANK_LINE
                ]:
                    if token.type == Type.BLANK_LINE:
                        # A blank line.
                        blank_lines += 1
                    elif token.type == Type.WHITESPACE and not token.line.strip(
                    ):
                        # A line with only whitespace on it.
                        blank_lines += 1
                    token = token.previous

                # Log errors.
                error_message = False
                expected_blank_lines = 0

                # Only need blank line before file overview if it is not the beginning
                # of the file, e.g. copyright is first.
                if is_file_overview and blank_lines == 0 and block_start.previous:
                    error_message = 'Should have a blank line before a file overview.'
                    expected_blank_lines = 1
                elif is_constructor and blank_lines != 3:
                    error_message = (
                        'Should have 3 blank lines before a constructor/interface.'
                    )
                    expected_blank_lines = 3
                elif not is_file_overview and not is_constructor and blank_lines != 2:
                    error_message = 'Should have 2 blank lines between top-level blocks.'
                    expected_blank_lines = 2

                if error_message:
                    self._HandleError(errors.WRONG_BLANK_LINE_COUNT,
                                      error_message, block_start,
                                      Position.AtBeginning(),
                                      expected_blank_lines - blank_lines)

        elif token.type == Type.END_BLOCK:
            if state.InFunction() and state.IsFunctionClose():
                is_immediately_called = (token.next and token.next.type
                                         == Type.START_PAREN)

                function = state.GetFunction()
                if not self._limited_doc_checks:
                    if (function.has_return and function.doc
                            and not is_immediately_called
                            and not function.doc.HasFlag('return')
                            and not function.doc.InheritsDocumentation()
                            and not function.doc.HasFlag('constructor')):
                        # Check for proper documentation of return value.
                        self._HandleError(
                            errors.MISSING_RETURN_DOCUMENTATION,
                            'Missing @return JsDoc in function with non-trivial return',
                            function.doc.end_token, Position.AtBeginning())
                    elif (not function.has_return and not function.has_throw
                          and function.doc and function.doc.HasFlag('return')
                          and not state.InInterfaceMethod()):
                        return_flag = function.doc.GetFlag('return')
                        if (return_flag.type is None
                                or ('undefined' not in return_flag.type
                                    and 'void' not in return_flag.type
                                    and '*' not in return_flag.type)):
                            self._HandleError(
                                errors.UNNECESSARY_RETURN_DOCUMENTATION,
                                'Found @return JsDoc on function that returns nothing',
                                return_flag.flag_token, Position.AtBeginning())

                # b/4073735. Method in object literal definition of prototype can
                # safely reference 'this'.
                prototype_object_literal = False
                block_start = None
                previous_code = None
                previous_previous_code = None

                # Search for cases where prototype is defined as object literal.
                #       previous_previous_code
                #       |       previous_code
                #       |       | block_start
                #       |       | |
                # a.b.prototype = {
                #   c : function() {
                #     this.d = 1;
                #   }
                # }

                # If in object literal, find first token of block so to find previous
                # tokens to check above condition.
                if state.InObjectLiteral():
                    block_start = state.GetCurrentBlockStart()

                # If an object literal then get previous token (code type). For above
                # case it should be '='.
                if block_start:
                    previous_code = tokenutil.SearchExcept(
                        block_start, Type.NON_CODE_TYPES, None, True)

                # If previous token to block is '=' then get its previous token.
                if previous_code and previous_code.IsOperator('='):
                    previous_previous_code = tokenutil.SearchExcept(
                        previous_code, Type.NON_CODE_TYPES, None, True)

                # If variable/token before '=' ends with '.prototype' then its above
                # case of prototype defined with object literal.
                prototype_object_literal = (
                    previous_previous_code
                    and previous_previous_code.string.endswith('.prototype'))

                if (function.has_this and function.doc
                        and not function.doc.HasFlag('this')
                        and not function.is_constructor
                        and not function.is_interface
                        and '.prototype.' not in function.name
                        and not prototype_object_literal):
                    self._HandleError(
                        errors.MISSING_JSDOC_TAG_THIS,
                        'Missing @this JsDoc in function referencing "this". ('
                        'this usually means you are trying to reference "this" in '
                        'a static function, or you have forgotten to mark a '
                        'constructor with @constructor)',
                        function.doc.end_token, Position.AtBeginning())

        elif token.type == Type.IDENTIFIER:
            if token.string == 'goog.inherits' and not state.InFunction():
                if state.GetLastNonSpaceToken(
                ).line_number == token.line_number:
                    self._HandleError(
                        errors.MISSING_LINE,
                        'Missing newline between constructor and goog.inherits',
                        token, Position.AtBeginning())

                extra_space = state.GetLastNonSpaceToken().next
                while extra_space != token:
                    if extra_space.type == Type.BLANK_LINE:
                        self._HandleError(
                            errors.EXTRA_LINE,
                            'Extra line between constructor and goog.inherits',
                            extra_space)
                    extra_space = extra_space.next

                # TODO(robbyw): Test the last function was a constructor.
                # TODO(robbyw): Test correct @extends and @implements documentation.

            elif (token.string == 'goog.provide' and not state.InFunction()
                  and namespaces_info is not None):
                namespace = tokenutil.Search(token, Type.STRING_TEXT).string

                # Report extra goog.provide statement.
                if namespaces_info.IsExtraProvide(token):
                    msg = 'Unnecessary goog.provide: ' + namespace

                    # Hint to user if this is a Test namespace.
                    if namespace.endswith('Test'):
                        msg += (' *Test namespaces must be mentioned in the '
                                'goog.setTestOnly() call')

                    self._HandleError(errors.EXTRA_GOOG_PROVIDE,
                                      msg,
                                      token,
                                      position=Position.AtBeginning())

                if namespaces_info.IsLastProvide(token):
                    # Report missing provide statements after the last existing provide.
                    missing_provides = namespaces_info.GetMissingProvides()
                    if missing_provides:
                        self._ReportMissingProvides(
                            missing_provides,
                            tokenutil.GetLastTokenInSameLine(token).next,
                            False)

                    # If there are no require statements, missing requires should be
                    # reported after the last provide.
                    if not namespaces_info.GetRequiredNamespaces():
                        missing_requires = namespaces_info.GetMissingRequires()
                        if missing_requires:
                            self._ReportMissingRequires(
                                missing_requires,
                                tokenutil.GetLastTokenInSameLine(token).next,
                                True)

            elif (token.string == 'goog.require' and not state.InFunction()
                  and namespaces_info is not None):
                namespace = tokenutil.Search(token, Type.STRING_TEXT).string

                # If there are no provide statements, missing provides should be
                # reported before the first require.
                if (namespaces_info.IsFirstRequire(token)
                        and not namespaces_info.GetProvidedNamespaces()):
                    missing_provides = namespaces_info.GetMissingProvides()
                    if missing_provides:
                        self._ReportMissingProvides(
                            missing_provides,
                            tokenutil.GetFirstTokenInSameLine(token), True)

                # Report extra goog.require statement.
                if namespaces_info.IsExtraRequire(token):

                    self._HandleError(errors.EXTRA_GOOG_REQUIRE,
                                      'Unnecessary goog.require: ' + namespace,
                                      token,
                                      position=Position.AtBeginning())

                # Report missing goog.require statements.
                if namespaces_info.IsLastRequire(token):
                    missing_requires = namespaces_info.GetMissingRequires()
                    if missing_requires:
                        self._ReportMissingRequires(
                            missing_requires,
                            tokenutil.GetLastTokenInSameLine(token).next,
                            False)

        elif token.type == Type.OPERATOR:
            last_in_line = token.IsLastInLine()
            # If the token is unary and appears to be used in a unary context
            # it's ok.  Otherwise, if it's at the end of the line or immediately
            # before a comment, it's ok.
            # Don't report an error before a start bracket - it will be reported
            # by that token's space checks.
            if (not token.metadata.IsUnaryOperator() and not last_in_line
                    and not token.next.IsComment()
                    and not token.next.IsOperator(',') and not token.next.type
                    in (Type.WHITESPACE, Type.END_PAREN, Type.END_BRACKET,
                        Type.SEMICOLON, Type.START_BRACKET)):
                self._HandleError(errors.MISSING_SPACE,
                                  'Missing space after "%s"' % token.string,
                                  token, Position.AtEnd(token.string))
        elif token.type == Type.WHITESPACE:
            first_in_line = token.IsFirstInLine()
            last_in_line = token.IsLastInLine()
            # Check whitespace length if it's not the first token of the line and
            # if it's not immediately before a comment.
            if not last_in_line and not first_in_line and not token.next.IsComment(
            ):
                # Ensure there is no space after opening parentheses.
                if (token.previous.type
                        in (Type.START_PAREN, Type.START_BRACKET,
                            Type.FUNCTION_NAME)
                        or token.next.type == Type.START_PARAMETERS):
                    self._HandleError(
                        errors.EXTRA_SPACE,
                        'Extra space after "%s"' % token.previous.string,
                        token, Position.All(token.string))
        elif token.type == Type.SEMICOLON:
            previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
                                                    None, True)
            if (previous_token.type == Type.KEYWORD
                    and not previous_token.string
                    in ['break', 'continue', 'return']):
                self._HandleError(
                    errors.REDUNDANT_SEMICOLON,
                    ('Semicolon after \'%s\' without any statement.'
                     ' Looks like an error.' % previous_token.string), token,
                    Position.AtEnd(token.string))
Beispiel #15
0
    def __init__(self, flag_token):
        """Creates the DocFlag object and attaches it to the given start token.

    Args:
      flag_token: The starting token of the flag.
    """
        self.flag_token = flag_token
        self.flag_type = flag_token.string.strip().lstrip('@')

        # Extract type, if applicable.
        self.type = None
        self.type_start_token = None
        self.type_end_token = None
        if self.flag_type in self.HAS_TYPE:
            brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
                                          Type.FLAG_ENDING_TYPES)
            if brace:
                end_token, contents = _GetMatchingEndBraceAndContents(brace)
                self.type = contents
                self.type_start_token = brace
                self.type_end_token = end_token
            elif (self.flag_type in self.TYPE_ONLY
                  and flag_token.next.type not in Type.FLAG_ENDING_TYPES):
                self.type_start_token = flag_token.next
                self.type_end_token, self.type = _GetEndTokenAndContents(
                    self.type_start_token)
                if self.type is not None:
                    self.type = self.type.strip()

        # Extract name, if applicable.
        self.name_token = None
        self.name = None
        if self.flag_type in self.HAS_NAME:
            # Handle bad case, name could be immediately after flag token.
            self.name_token = _GetNextIdentifierToken(flag_token)

            # Handle good case, if found token is after type start, look for
            # identifier after type end, since types contain identifiers.
            if (self.type and self.name_token and tokenutil.Compare(
                    self.name_token, self.type_start_token) > 0):
                self.name_token = _GetNextIdentifierToken(self.type_end_token)

            if self.name_token:
                self.name = self.name_token.string

        # Extract description, if applicable.
        self.description_start_token = None
        self.description_end_token = None
        self.description = None
        if self.flag_type in self.HAS_DESCRIPTION:
            search_start_token = flag_token
            if self.name_token and self.type_end_token:
                if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
                    search_start_token = self.type_end_token
                else:
                    search_start_token = self.name_token
            elif self.name_token:
                search_start_token = self.name_token
            elif self.type:
                search_start_token = self.type_end_token

            interesting_token = tokenutil.Search(
                search_start_token,
                Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
            if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
                self.description_start_token = interesting_token
                self.description_end_token, self.description = (
                    _GetEndTokenAndContents(interesting_token))
  def HandleError(self, error):
    """Attempts to fix the error.

    Args:
      error: The error object
    """
    code = error.code
    token = error.token

    if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
      iterator = token.attached_object.type_start_token
      if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
        iterator = iterator.next

      leading_space = len(iterator.string) - len(iterator.string.lstrip())
      iterator.string = '%s?%s' % (' ' * leading_space,
                                   iterator.string.lstrip())

      # Cover the no outer brace case where the end token is part of the type.
      while iterator and iterator != token.attached_object.type_end_token.next:
        iterator.string = iterator.string.replace(
            'null|', '').replace('|null', '')
        iterator = iterator.next

      # Create a new flag object with updated type info.
      token.attached_object = javascriptstatetracker.JsDocFlag(token)
      self._AddFix(token)

    elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
      iterator = token.attached_object.type_end_token
      if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
        iterator = iterator.previous

      ending_space = len(iterator.string) - len(iterator.string.rstrip())
      iterator.string = '%s=%s' % (iterator.string.rstrip(),
                                   ' ' * ending_space)

      # Create a new flag object with updated type info.
      token.attached_object = javascriptstatetracker.JsDocFlag(token)
      self._AddFix(token)

    elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                  errors.MISSING_SEMICOLON):
      semicolon_token = Token(';', Type.SEMICOLON, token.line,
                              token.line_number)
      tokenutil.InsertTokenAfter(semicolon_token, token)
      token.metadata.is_implied_semicolon = False
      semicolon_token.metadata.is_implied_semicolon = False
      self._AddFix(token)

    elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                  errors.REDUNDANT_SEMICOLON,
                  errors.COMMA_AT_END_OF_LITERAL):
      tokenutil.DeleteToken(token)
      self._AddFix(token)

    elif code == errors.INVALID_JSDOC_TAG:
      if token.string == '@returns':
        token.string = '@return'
        self._AddFix(token)

    elif code == errors.FILE_MISSING_NEWLINE:
      # This error is fixed implicitly by the way we restore the file
      self._AddFix(token)

    elif code == errors.MISSING_SPACE:
      if error.position:
        if error.position.IsAtBeginning():
          tokenutil.InsertSpaceTokenAfter(token.previous)
        elif error.position.IsAtEnd(token.string):
          tokenutil.InsertSpaceTokenAfter(token)
        else:
          token.string = error.position.Set(token.string, ' ')
        self._AddFix(token)

    elif code == errors.EXTRA_SPACE:
      if error.position:
        token.string = error.position.Set(token.string, '')
        self._AddFix(token)

    elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
      token.string = error.position.Set(token.string, '.')
      self._AddFix(token)

    elif code == errors.MISSING_LINE:
      if error.position.IsAtBeginning():
        tokenutil.InsertBlankLineAfter(token.previous)
      else:
        tokenutil.InsertBlankLineAfter(token)
      self._AddFix(token)

    elif code == errors.EXTRA_LINE:
      tokenutil.DeleteToken(token)
      self._AddFix(token)

    elif code == errors.WRONG_BLANK_LINE_COUNT:
      if not token.previous:
        # TODO(user): Add an insertBefore method to tokenutil.
        return

      num_lines = error.fix_data
      should_delete = False

      if num_lines < 0:
        num_lines *= -1
        should_delete = True

      for i in xrange(1, num_lines + 1):
        if should_delete:
          # TODO(user): DeleteToken should update line numbers.
          tokenutil.DeleteToken(token.previous)
        else:
          tokenutil.InsertBlankLineAfter(token.previous)
        self._AddFix(token)

    elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
      end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
      if end_quote:
        single_quote_start = Token(
            "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
        single_quote_end = Token(
            "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
            token.line_number)

        tokenutil.InsertTokenAfter(single_quote_start, token)
        tokenutil.InsertTokenAfter(single_quote_end, end_quote)
        tokenutil.DeleteToken(token)
        tokenutil.DeleteToken(end_quote)
        self._AddFix([token, end_quote])

    elif code == errors.MISSING_BRACES_AROUND_TYPE:
      fixed_tokens = []
      start_token = token.attached_object.type_start_token

      if start_token.type != Type.DOC_START_BRACE:
        leading_space = (
            len(start_token.string) - len(start_token.string.lstrip()))
        if leading_space:
          start_token = tokenutil.SplitToken(start_token, leading_space)
          # Fix case where start and end token were the same.
          if token.attached_object.type_end_token == start_token.previous:
            token.attached_object.type_end_token = start_token

        new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
                          start_token.line_number)
        tokenutil.InsertTokenAfter(new_token, start_token.previous)
        token.attached_object.type_start_token = new_token
        fixed_tokens.append(new_token)

      end_token = token.attached_object.type_end_token
      if end_token.type != Type.DOC_END_BRACE:
        # If the start token was a brace, the end token will be a
        # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
        # the end token is the last token of the actual type.
        last_type = end_token
        if not fixed_tokens:
          last_type = end_token.previous

        while last_type.string.isspace():
          last_type = last_type.previous

        # If there was no starting brace then a lone end brace wouldn't have
        # been type end token. Now that we've added any missing start brace,
        # see if the last effective type token was an end brace.
        if last_type.type != Type.DOC_END_BRACE:
          trailing_space = (len(last_type.string) -
                            len(last_type.string.rstrip()))
          if trailing_space:
            tokenutil.SplitToken(last_type,
                                 len(last_type.string) - trailing_space)

          new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
                            last_type.line_number)
          tokenutil.InsertTokenAfter(new_token, last_type)
          token.attached_object.type_end_token = new_token
          fixed_tokens.append(new_token)

      self._AddFix(fixed_tokens)

    elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
      require_start_token = error.fix_data
      sorter = requireprovidesorter.RequireProvideSorter()
      sorter.FixRequires(require_start_token)

      self._AddFix(require_start_token)

    elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
      provide_start_token = error.fix_data
      sorter = requireprovidesorter.RequireProvideSorter()
      sorter.FixProvides(provide_start_token)

      self._AddFix(provide_start_token)

    elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
      if token.previous.string == '{' and token.next.string == '}':
        tokenutil.DeleteToken(token.previous)
        tokenutil.DeleteToken(token.next)
        self._AddFix([token])

    elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
      match = INVERTED_AUTHOR_SPEC.match(token.string)
      if match:
        token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
                                         match.group('email'),
                                         match.group('whitespace_after_name'),
                                         match.group('name'),
                                         match.group('trailing_characters'))
        self._AddFix(token)

    elif (code == errors.WRONG_INDENTATION and
          not FLAGS.disable_indentation_fixing):
      token = tokenutil.GetFirstTokenInSameLine(token)
      actual = error.position.start
      expected = error.position.length

      if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
        token.string = token.string.lstrip() + (' ' * expected)
        self._AddFix([token])
      else:
        # We need to add indentation.
        new_token = Token(' ' * expected, Type.WHITESPACE,
                          token.line, token.line_number)
        # Note that we'll never need to add indentation at the first line,
        # since it will always not be indented.  Therefore it's safe to assume
        # token.previous exists.
        tokenutil.InsertTokenAfter(new_token, token.previous)
        self._AddFix([token])

    elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
                  errors.MISSING_END_OF_SCOPE_COMMENT]:
      # Only fix cases where }); is found with no trailing content on the line
      # other than a comment. Value of 'token' is set to } for this error.
      if (token.type == Type.END_BLOCK and
          token.next.type == Type.END_PAREN and
          token.next.next.type == Type.SEMICOLON):
        current_token = token.next.next.next
        removed_tokens = []
        while current_token and current_token.line_number == token.line_number:
          if current_token.IsAnyType(Type.WHITESPACE,
                                     Type.START_SINGLE_LINE_COMMENT,
                                     Type.COMMENT):
            removed_tokens.append(current_token)
            current_token = current_token.next
          else:
            return

        if removed_tokens:
          tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))

        whitespace_token = Token('  ', Type.WHITESPACE, token.line,
                                 token.line_number)
        start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
                                    token.line, token.line_number)
        comment_token = Token(' goog.scope', Type.COMMENT, token.line,
                              token.line_number)
        insertion_tokens = [whitespace_token, start_comment_token,
                            comment_token]

        tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
        self._AddFix(removed_tokens + insertion_tokens)

    elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
      tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
      tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
      self._AddFix(tokens_in_line)

    elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
      is_provide = code == errors.MISSING_GOOG_PROVIDE
      is_require = code == errors.MISSING_GOOG_REQUIRE

      missing_namespaces = error.fix_data[0]
      need_blank_line = error.fix_data[1]

      if need_blank_line is None:
        # TODO(user): This happens when there are no existing
        # goog.provide or goog.require statements to position new statements
        # relative to. Consider handling this case with a heuristic.
        return

      insert_location = token.previous

      # If inserting a missing require with no existing requires, insert a
      # blank line first.
      if need_blank_line and is_require:
        tokenutil.InsertBlankLineAfter(insert_location)
        insert_location = insert_location.next

      for missing_namespace in missing_namespaces:
        new_tokens = self._GetNewRequireOrProvideTokens(
            is_provide, missing_namespace, insert_location.line_number + 1)
        tokenutil.InsertLineAfter(insert_location, new_tokens)
        insert_location = new_tokens[-1]
        self._AddFix(new_tokens)

      # If inserting a missing provide with no existing provides, insert a
      # blank line after.
      if need_blank_line and is_provide:
        tokenutil.InsertBlankLineAfter(insert_location)
    def CheckToken(self, token, state):
        """Checks a token, given the current parser_state, for warnings and errors.

    Args:
      token: The current token under consideration
      state: parser_state object that indicates the current state in the page
    """
        if self.__ContainsRecordType(token):
            # We should bail out and not emit any warnings for this annotation.
            # TODO(nicksantos): Support record types for real.
            state.GetDocComment().Invalidate()
            return

        # Call the base class's CheckToken function.
        super(JavaScriptLintRules, self).CheckToken(token, state)

        # Store some convenience variables
        namespaces_info = self._namespaces_info

        if token.type == Type.DOC_FLAG:
            flag = token.attached_object

            if flag.flag_type == 'param' and flag.name_token is not None:
                self._CheckForMissingSpaceBeforeToken(
                    token.attached_object.name_token)

                if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER)
                        and flag.type is not None and flag.name is not None):
                    # Check for optional marker in type.
                    if (flag.type.endswith('=')
                            and not flag.name.startswith('opt_')):
                        self._HandleError(
                            errors.JSDOC_MISSING_OPTIONAL_PREFIX,
                            'Optional parameter name %s must be prefixed '
                            'with opt_.' % flag.name, token)
                    elif (not flag.type.endswith('=')
                          and flag.name.startswith('opt_')):
                        self._HandleError(
                            errors.JSDOC_MISSING_OPTIONAL_TYPE,
                            'Optional parameter %s type must end with =.' %
                            flag.name, token)

            if flag.flag_type in state.GetDocFlag().HAS_TYPE:
                # Check for both missing type token and empty type braces '{}'
                # Missing suppress types are reported separately and we allow enums
                # without types.
                if (flag.flag_type not in ('suppress', 'enum')
                        and (not flag.type or flag.type.isspace())):
                    self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
                                      'Missing type in %s tag' % token.string,
                                      token)

                elif flag.name_token and flag.type_end_token and tokenutil.Compare(
                        flag.type_end_token, flag.name_token) > 0:
                    self._HandleError(
                        errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
                        'Type should be immediately after %s tag' %
                        token.string, token)

        elif token.type == Type.DOUBLE_QUOTE_STRING_START:
            next_token = token.next
            while next_token.type == Type.STRING_TEXT:
                if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
                        next_token.string):
                    break
                next_token = next_token.next
            else:
                self._HandleError(
                    errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
                    'Single-quoted string preferred over double-quoted string.',
                    token, Position.All(token.string))

        elif token.type == Type.END_DOC_COMMENT:
            doc_comment = state.GetDocComment()

            # When @externs appears in a @fileoverview comment, it should trigger
            # the same limited doc checks as a special filename like externs.js.
            if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag(
                    'externs'):
                self._SetLimitedDocChecks(True)

            if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL)
                    and not self._is_html and state.InTopLevel()
                    and not state.InBlock()):

                # Check if we're in a fileoverview or constructor JsDoc.
                is_constructor = (doc_comment.HasFlag('constructor')
                                  or doc_comment.HasFlag('interface'))
                is_file_overview = doc_comment.HasFlag('fileoverview')

                # If the comment is not a file overview, and it does not immediately
                # precede some code, skip it.
                # NOTE: The tokenutil methods are not used here because of their
                # behavior at the top of a file.
                next_token = token.next
                if (not next_token
                        or (not is_file_overview
                            and next_token.type in Type.NON_CODE_TYPES)):
                    return

                # Don't require extra blank lines around suppression of extra
                # goog.require errors.
                if (doc_comment.SuppressionOnly()
                        and next_token.type == Type.IDENTIFIER and
                        next_token.string in ['goog.provide', 'goog.require']):
                    return

                # Find the start of this block (include comments above the block, unless
                # this is a file overview).
                block_start = doc_comment.start_token
                if not is_file_overview:
                    token = block_start.previous
                    while token and token.type in Type.COMMENT_TYPES:
                        block_start = token
                        token = token.previous

                # Count the number of blank lines before this block.
                blank_lines = 0
                token = block_start.previous
                while token and token.type in [
                        Type.WHITESPACE, Type.BLANK_LINE
                ]:
                    if token.type == Type.BLANK_LINE:
                        # A blank line.
                        blank_lines += 1
                    elif token.type == Type.WHITESPACE and not token.line.strip(
                    ):
                        # A line with only whitespace on it.
                        blank_lines += 1
                    token = token.previous

                # Log errors.
                error_message = False
                expected_blank_lines = 0

                if is_file_overview and blank_lines == 0:
                    error_message = 'Should have a blank line before a file overview.'
                    expected_blank_lines = 1
                elif is_constructor and blank_lines != 3:
                    error_message = (
                        'Should have 3 blank lines before a constructor/interface.'
                    )
                    expected_blank_lines = 3
                elif not is_file_overview and not is_constructor and blank_lines != 2:
                    error_message = 'Should have 2 blank lines between top-level blocks.'
                    expected_blank_lines = 2

                if error_message:
                    self._HandleError(errors.WRONG_BLANK_LINE_COUNT,
                                      error_message, block_start,
                                      Position.AtBeginning(),
                                      expected_blank_lines - blank_lines)

        elif token.type == Type.END_BLOCK:
            if state.InFunction() and state.IsFunctionClose():
                is_immediately_called = (token.next and token.next.type
                                         == Type.START_PAREN)

                function = state.GetFunction()
                if not self._limited_doc_checks:
                    if (function.has_return and function.doc
                            and not is_immediately_called
                            and not function.doc.HasFlag('return')
                            and not function.doc.InheritsDocumentation()
                            and not function.doc.HasFlag('constructor')):
                        # Check for proper documentation of return value.
                        self._HandleError(
                            errors.MISSING_RETURN_DOCUMENTATION,
                            'Missing @return JsDoc in function with non-trivial return',
                            function.doc.end_token, Position.AtBeginning())
                    elif (not function.has_return and not function.has_throw
                          and function.doc and function.doc.HasFlag('return')
                          and not state.InInterfaceMethod()):
                        return_flag = function.doc.GetFlag('return')
                        if (return_flag.type is None
                                or ('undefined' not in return_flag.type
                                    and 'void' not in return_flag.type
                                    and '*' not in return_flag.type)):
                            self._HandleError(
                                errors.UNNECESSARY_RETURN_DOCUMENTATION,
                                'Found @return JsDoc on function that returns nothing',
                                return_flag.flag_token, Position.AtBeginning())

            if state.InFunction() and state.IsFunctionClose():
                is_immediately_called = (token.next and token.next.type
                                         == Type.START_PAREN)
                if (function.has_this and function.doc
                        and not function.doc.HasFlag('this')
                        and not function.is_constructor
                        and not function.is_interface
                        and '.prototype.' not in function.name):
                    self._HandleError(
                        errors.MISSING_JSDOC_TAG_THIS,
                        'Missing @this JsDoc in function referencing "this". ('
                        'this usually means you are trying to reference "this" in '
                        'a static function, or you have forgotten to mark a '
                        'constructor with @constructor)',
                        function.doc.end_token, Position.AtBeginning())

        elif token.type == Type.IDENTIFIER:
            if token.string == 'goog.inherits' and not state.InFunction():
                if state.GetLastNonSpaceToken(
                ).line_number == token.line_number:
                    self._HandleError(
                        errors.MISSING_LINE,
                        'Missing newline between constructor and goog.inherits',
                        token, Position.AtBeginning())

                extra_space = state.GetLastNonSpaceToken().next
                while extra_space != token:
                    if extra_space.type == Type.BLANK_LINE:
                        self._HandleError(
                            errors.EXTRA_LINE,
                            'Extra line between constructor and goog.inherits',
                            extra_space)
                    extra_space = extra_space.next

                # TODO(robbyw): Test the last function was a constructor.
                # TODO(robbyw): Test correct @extends and @implements documentation.

            elif (token.string == 'goog.provide' and not state.InFunction()
                  and namespaces_info is not None):
                namespace = tokenutil.Search(token, Type.STRING_TEXT).string

                # Report extra goog.provide statement.
                if namespaces_info.IsExtraProvide(token):
                    self._HandleError(errors.EXTRA_GOOG_PROVIDE,
                                      'Unnecessary goog.provide: ' + namespace,
                                      token,
                                      position=Position.AtBeginning())

                if namespaces_info.IsLastProvide(token):
                    # Report missing provide statements after the last existing provide.
                    missing_provides = namespaces_info.GetMissingProvides()
                    if missing_provides:
                        self._ReportMissingProvides(
                            missing_provides,
                            tokenutil.GetLastTokenInSameLine(token).next,
                            False)

                    # If there are no require statements, missing requires should be
                    # reported after the last provide.
                    if not namespaces_info.GetRequiredNamespaces():
                        missing_requires = namespaces_info.GetMissingRequires()
                        if missing_requires:
                            self._ReportMissingRequires(
                                missing_requires,
                                tokenutil.GetLastTokenInSameLine(token).next,
                                True)

            elif (token.string == 'goog.require' and not state.InFunction()
                  and namespaces_info is not None):
                namespace = tokenutil.Search(token, Type.STRING_TEXT).string

                # If there are no provide statements, missing provides should be
                # reported before the first require.
                if (namespaces_info.IsFirstRequire(token)
                        and not namespaces_info.GetProvidedNamespaces()):
                    missing_provides = namespaces_info.GetMissingProvides()
                    if missing_provides:
                        self._ReportMissingProvides(
                            missing_provides,
                            tokenutil.GetFirstTokenInSameLine(token), True)

                # Report extra goog.require statement.
                if namespaces_info.IsExtraRequire(token):
                    self._HandleError(errors.EXTRA_GOOG_REQUIRE,
                                      'Unnecessary goog.require: ' + namespace,
                                      token,
                                      position=Position.AtBeginning())

                # Report missing goog.require statements.
                if namespaces_info.IsLastRequire(token):
                    missing_requires = namespaces_info.GetMissingRequires()
                    if missing_requires:
                        self._ReportMissingRequires(
                            missing_requires,
                            tokenutil.GetLastTokenInSameLine(token).next,
                            False)

        elif token.type == Type.OPERATOR:
            last_in_line = token.IsLastInLine()
            # If the token is unary and appears to be used in a unary context
            # it's ok.  Otherwise, if it's at the end of the line or immediately
            # before a comment, it's ok.
            # Don't report an error before a start bracket - it will be reported
            # by that token's space checks.
            if (not token.metadata.IsUnaryOperator() and not last_in_line
                    and not token.next.IsComment()
                    and not token.next.IsOperator(',') and not token.next.type
                    in (Type.WHITESPACE, Type.END_PAREN, Type.END_BRACKET,
                        Type.SEMICOLON, Type.START_BRACKET)):
                self._HandleError(errors.MISSING_SPACE,
                                  'Missing space after "%s"' % token.string,
                                  token, Position.AtEnd(token.string))
        elif token.type == Type.WHITESPACE:
            first_in_line = token.IsFirstInLine()
            last_in_line = token.IsLastInLine()
            # Check whitespace length if it's not the first token of the line and
            # if it's not immediately before a comment.
            if not last_in_line and not first_in_line and not token.next.IsComment(
            ):
                # Ensure there is no space after opening parentheses.
                if (token.previous.type
                        in (Type.START_PAREN, Type.START_BRACKET,
                            Type.FUNCTION_NAME)
                        or token.next.type == Type.START_PARAMETERS):
                    self._HandleError(
                        errors.EXTRA_SPACE,
                        'Extra space after "%s"' % token.previous.string,
                        token, Position.All(token.string))