Esempio n. 1
0
  def testGetNextCodeToken(self):

    tokens = testutil.TokenizeSource("""
start1. // comment
    /* another comment */
    end1
""")

    def _GetTokenStartingWith(token_starts_with):
      for t in tokens:
        if t.string.startswith(token_starts_with):
          return t

    self.assertEquals(
        'end1',
        tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)

    self.assertEquals(
        None,
        tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
Esempio n. 2
0
  def _CheckOperator(self, token):
    """Checks an operator for spacing and line style.

    Args:
      token: The operator token.
    """
    last_code = token.metadata.last_code

    if not self._ExpectSpaceBeforeOperator(token):
      if (token.previous and token.previous.type == Type.WHITESPACE and
          last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
          last_code.line_number == token.line_number):
        self._HandleError(
            errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
            token.previous, position=Position.All(token.previous.string))

    elif (token.previous and
          not token.previous.IsComment() and
          not tokenutil.IsDot(token) and
          token.previous.type in Type.EXPRESSION_ENDER_TYPES):
      self._HandleError(errors.MISSING_SPACE,
                        'Missing space before "%s"' % token.string, token,
                        position=Position.AtBeginning())

    # Check wrapping of operators.
    next_code = tokenutil.GetNextCodeToken(token)

    is_dot = tokenutil.IsDot(token)
    wrapped_before = last_code and last_code.line_number != token.line_number
    wrapped_after = next_code and next_code.line_number != token.line_number

    if FLAGS.dot_on_next_line and is_dot and wrapped_after:
      self._HandleError(
          errors.LINE_ENDS_WITH_DOT,
          '"." must go on the following line',
          token)
    if (not is_dot and wrapped_before and
        not token.metadata.IsUnaryOperator()):
      self._HandleError(
          errors.LINE_STARTS_WITH_OPERATOR,
          'Binary operator must go on previous line "%s"' % token.string,
          token)
Esempio n. 3
0
  def CheckToken(self, token, state):
    """Checks a token, given the current parser_state, for warnings and errors.

    Args:
      token: The current token under consideration
      state: parser_state object that indicates the current state in the page
    """
    # Store some convenience variables
    first_in_line = token.IsFirstInLine()
    last_in_line = token.IsLastInLine()
    last_non_space_token = state.GetLastNonSpaceToken()

    token_type = token.type

    # Process the line change.
    if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
      # TODO(robbyw): Support checking indentation in HTML files.
      indentation_errors = self._indentation.CheckToken(token, state)
      for indentation_error in indentation_errors:
        self._HandleError(*indentation_error)

    if last_in_line:
      self._CheckLineLength(token, state)

    if token_type == Type.PARAMETERS:
      # Find missing spaces in parameter lists.
      if self.MISSING_PARAMETER_SPACE.search(token.string):
        fix_data = ', '.join([s.strip() for s in token.string.split(',')])
        self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
                          token, position=None, fix_data=fix_data.strip())

      # Find extra spaces at the beginning of parameter lists.  Make sure
      # we aren't at the beginning of a continuing multi-line list.
      if not first_in_line:
        space_count = len(token.string) - len(token.string.lstrip())
        if space_count:
          self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
                            token, position=Position(0, space_count))

    elif (token_type == Type.START_BLOCK and
          token.metadata.context.type == Context.BLOCK):
      self._CheckForMissingSpaceBeforeToken(token)

    elif token_type == Type.END_BLOCK:
      last_code = token.metadata.last_code

      if FLAGS.check_trailing_comma:
        if last_code.IsOperator(','):
          self._HandleError(
              errors.COMMA_AT_END_OF_LITERAL,
              'Illegal comma at end of object literal', last_code,
              position=Position.All(last_code.string))

      if state.InFunction() and state.IsFunctionClose():
        if state.InTopLevelFunction():
          # A semicolons should not be included at the end of a function
          # declaration.
          if not state.InAssignedFunction():
            if not last_in_line and token.next.type == Type.SEMICOLON:
              self._HandleError(
                  errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                  'Illegal semicolon after function declaration',
                  token.next, position=Position.All(token.next.string))

        # A semicolon should be included at the end of a function expression
        # that is not immediately called or used by a dot operator.
        if (state.InAssignedFunction() and token.next
            and token.next.type != Type.SEMICOLON):
          next_token = tokenutil.GetNextCodeToken(token)
          is_immediately_used = next_token and (
              next_token.type == Type.START_PAREN or
              tokenutil.IsDot(next_token))
          if not is_immediately_used:
            self._HandleError(
                errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                'Missing semicolon after function assigned to a variable',
                token, position=Position.AtEnd(token.string))

        if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
          self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
                            'Interface methods cannot contain code', last_code)

      elif (state.IsBlockClose() and
            token.next and token.next.type == Type.SEMICOLON):
        if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
            and last_code.metadata.context.type != Context.OBJECT_LITERAL):
          self._HandleError(
              errors.REDUNDANT_SEMICOLON,
              'No semicolon is required to end a code block',
              token.next, position=Position.All(token.next.string))

    elif token_type == Type.SEMICOLON:
      if token.previous and token.previous.type == Type.WHITESPACE:
        self._HandleError(
            errors.EXTRA_SPACE, 'Extra space before ";"',
            token.previous, position=Position.All(token.previous.string))

      if token.next and token.next.line_number == token.line_number:
        if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
          # TODO(robbyw): Error about no multi-statement lines.
          pass

        elif token.next.type not in (
            Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
          self._HandleError(
              errors.MISSING_SPACE,
              'Missing space after ";" in for statement',
              token.next,
              position=Position.AtBeginning())

      last_code = token.metadata.last_code
      if last_code and last_code.type == Type.SEMICOLON:
        # Allow a single double semi colon in for loops for cases like:
        # for (;;) { }.
        # NOTE(user): This is not a perfect check, and will not throw an error
        # for cases like: for (var i = 0;; i < n; i++) {}, but then your code
        # probably won't work either.
        for_token = tokenutil.CustomSearch(
            last_code,
            lambda token: token.type == Type.KEYWORD and token.string == 'for',
            end_func=lambda token: token.type == Type.SEMICOLON,
            distance=None,
            reverse=True)

        if not for_token:
          self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
                            token, position=Position.All(token.string))

    elif token_type == Type.START_PAREN:
      # Ensure that opening parentheses have a space before any keyword
      # that is not being invoked like a member function.
      if (token.previous and token.previous.type == Type.KEYWORD and
          (not token.previous.metadata or
           not token.previous.metadata.last_code or
           not token.previous.metadata.last_code.string or
           token.previous.metadata.last_code.string[-1:] != '.')):
        self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
                          token, position=Position.AtBeginning())
      elif token.previous and token.previous.type == Type.WHITESPACE:
        before_space = token.previous.previous
        # Ensure that there is no extra space before a function invocation,
        # even if the function being invoked happens to be a keyword.
        if (before_space and before_space.line_number == token.line_number and
            before_space.type == Type.IDENTIFIER or
            (before_space.type == Type.KEYWORD and before_space.metadata and
             before_space.metadata.last_code and
             before_space.metadata.last_code.string and
             before_space.metadata.last_code.string[-1:] == '.')):
          self._HandleError(
              errors.EXTRA_SPACE, 'Extra space before "("',
              token.previous, position=Position.All(token.previous.string))

    elif token_type == Type.START_BRACKET:
      self._HandleStartBracket(token, last_non_space_token)
    elif token_type in (Type.END_PAREN, Type.END_BRACKET):
      # Ensure there is no space before closing parentheses, except when
      # it's in a for statement with an omitted section, or when it's at the
      # beginning of a line.
      last_code = token.metadata.last_code
      if FLAGS.check_trailing_comma and last_code.IsOperator(','):
        if token_type in [Type.END_BRACKET, Type.END_PAREN]:
          self._HandleError(
              errors.COMMA_AT_END_OF_LITERAL,
              'Illegal comma at end of array literal', last_code,
              position=Position.All(last_code.string))

      if (token.previous and token.previous.type == Type.WHITESPACE and
          not token.previous.IsFirstInLine() and
          not (last_non_space_token and last_non_space_token.line_number ==
               token.line_number and
               last_non_space_token.type == Type.SEMICOLON)):
        self._HandleError(
            errors.EXTRA_SPACE, 'Extra space before "%s"' %
            token.string, token.previous,
            position=Position.All(token.previous.string))

    elif token_type == Type.WHITESPACE:
      if self.ILLEGAL_TAB.search(token.string):
        if token.IsFirstInLine():
          if token.next:
            self._HandleError(
                errors.ILLEGAL_TAB,
                'Illegal tab in whitespace before "%s"' % token.next.string,
                token, position=Position.All(token.string))
          else:
            self._HandleError(
                errors.ILLEGAL_TAB,
                'Illegal tab in whitespace',
                token, position=Position.All(token.string))
        else:
          self._HandleError(
              errors.ILLEGAL_TAB,
              'Illegal tab in whitespace after "%s"' % token.previous.string,
              token, position=Position.All(token.string))

      # Check whitespace length if it's not the first token of the line and
      # if it's not immediately before a comment.
      if last_in_line:
        # Check for extra whitespace at the end of a line.
        self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
                          token, position=Position.All(token.string))
      elif not first_in_line and not token.next.IsComment():
        if token.length > 1:
          self._HandleError(
              errors.EXTRA_SPACE, 'Extra space after "%s"' %
              token.previous.string, token,
              position=Position(1, len(token.string) - 1))

    elif token_type == Type.OPERATOR:
      self._CheckOperator(token)
    elif token_type == Type.DOC_FLAG:
      flag = token.attached_object

      if flag.flag_type == 'bug':
        # TODO(robbyw): Check for exactly 1 space on the left.
        string = token.next.string.lstrip()
        string = string.split(' ', 1)[0]

        if not string.isdigit():
          self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
                            '@bug should be followed by a bug number', token)

      elif flag.flag_type == 'suppress':
        if flag.type is None:
          # A syntactically invalid suppress tag will get tokenized as a normal
          # flag, indicating an error.
          self._HandleError(
              errors.INCORRECT_SUPPRESS_SYNTAX,
              'Invalid suppress syntax: should be @suppress {errortype}. '
              'Spaces matter.', token)
        else:
          for suppress_type in flag.jstype.IterIdentifiers():
            if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
              self._HandleError(
                  errors.INVALID_SUPPRESS_TYPE,
                  'Invalid suppression type: %s' % suppress_type, token)

      elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
            flag.flag_type == 'author'):
        # TODO(user): In non strict mode check the author tag for as much as
        # it exists, though the full form checked below isn't required.
        string = token.next.string
        result = self.AUTHOR_SPEC.match(string)
        if not result:
          self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
                            'Author tag line should be of the form: '
                            '@author [email protected] (Your Name)',
                            token.next)
        else:
          # Check spacing between email address and name. Do this before
          # checking earlier spacing so positions are easier to calculate for
          # autofixing.
          num_spaces = len(result.group(2))
          if num_spaces < 1:
            self._HandleError(errors.MISSING_SPACE,
                              'Missing space after email address',
                              token.next, position=Position(result.start(2), 0))
          elif num_spaces > 1:
            self._HandleError(
                errors.EXTRA_SPACE, 'Extra space after email address',
                token.next,
                position=Position(result.start(2) + 1, num_spaces - 1))

          # Check for extra spaces before email address. Can't be too few, if
          # not at least one we wouldn't match @author tag.
          num_spaces = len(result.group(1))
          if num_spaces > 1:
            self._HandleError(errors.EXTRA_SPACE,
                              'Extra space before email address',
                              token.next, position=Position(1, num_spaces - 1))

      elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
            not self._limited_doc_checks):
        if flag.flag_type == 'param':
          if flag.name is None:
            self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
                              'Missing name in @param tag', token)

        if not flag.description or flag.description is None:
          flag_name = token.type
          if 'name' in token.values:
            flag_name = '@' + token.values['name']

          if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
            self._HandleError(
                errors.MISSING_JSDOC_TAG_DESCRIPTION,
                'Missing description in %s tag' % flag_name, token)
        else:
          self._CheckForMissingSpaceBeforeToken(flag.description_start_token)

      if flag.HasType():
        if flag.type_start_token is not None:
          self._CheckForMissingSpaceBeforeToken(
              token.attached_object.type_start_token)

        if flag.jstype and not flag.jstype.IsEmpty():
          self._CheckJsDocType(token, flag.jstype)

          if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
              flag.type_start_token.type != Type.DOC_START_BRACE or
              flag.type_end_token.type != Type.DOC_END_BRACE):
            self._HandleError(
                errors.MISSING_BRACES_AROUND_TYPE,
                'Type must always be surrounded by curly braces.', token)

    if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
      if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
          token.values['name'] not in FLAGS.custom_jsdoc_tags):
        self._HandleError(
            errors.INVALID_JSDOC_TAG,
            'Invalid JsDoc tag: %s' % token.values['name'], token)

      if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
          token.values['name'] == 'inheritDoc' and
          token_type == Type.DOC_INLINE_FLAG):
        self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
                          'Unnecessary braces around @inheritDoc',
                          token)

    elif token_type == Type.SIMPLE_LVALUE:
      identifier = token.values['identifier']

      if ((not state.InFunction() or state.InConstructor()) and
          state.InTopLevel() and not state.InObjectLiteralDescendant()):
        jsdoc = state.GetDocComment()
        if not state.HasDocComment(identifier):
          # Only test for documentation on identifiers with .s in them to
          # avoid checking things like simple variables. We don't require
          # documenting assignments to .prototype itself (bug 1880803).
          if (not state.InConstructor() and
              identifier.find('.') != -1 and not
              identifier.endswith('.prototype') and not
              self._limited_doc_checks):
            comment = state.GetLastComment()
            if not (comment and comment.lower().count('jsdoc inherited')):
              self._HandleError(
                  errors.MISSING_MEMBER_DOCUMENTATION,
                  "No docs found for member '%s'" % identifier,
                  token)
        elif jsdoc and (not state.InConstructor() or
                        identifier.startswith('this.')):
          # We are at the top level and the function/member is documented.
          if identifier.endswith('_') and not identifier.endswith('__'):
            # Can have a private class which inherits documentation from a
            # public superclass.
            #
            # @inheritDoc is deprecated in favor of using @override, and they
            if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
                and ('accessControls' not in jsdoc.suppressions)):
              self._HandleError(
                  errors.INVALID_OVERRIDE_PRIVATE,
                  '%s should not override a private member.' % identifier,
                  jsdoc.GetFlag('override').flag_token)
            if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
                and ('accessControls' not in jsdoc.suppressions)):
              self._HandleError(
                  errors.INVALID_INHERIT_DOC_PRIVATE,
                  '%s should not inherit from a private member.' % identifier,
                  jsdoc.GetFlag('inheritDoc').flag_token)
            if (not jsdoc.HasFlag('private') and
                ('underscore' not in jsdoc.suppressions) and not
                ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
                 ('accessControls' in jsdoc.suppressions))):
              self._HandleError(
                  errors.MISSING_PRIVATE,
                  'Member "%s" must have @private JsDoc.' %
                  identifier, token)
            if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
              self._HandleError(
                  errors.UNNECESSARY_SUPPRESS,
                  '@suppress {underscore} is not necessary with @private',
                  jsdoc.suppressions['underscore'])
          elif (jsdoc.HasFlag('private') and
                not self.InExplicitlyTypedLanguage()):
            # It is convention to hide public fields in some ECMA
            # implementations from documentation using the @private tag.
            self._HandleError(
                errors.EXTRA_PRIVATE,
                'Member "%s" must not have @private JsDoc' %
                identifier, token)

          # These flags are only legal on localizable message definitions;
          # such variables always begin with the prefix MSG_.
          if not identifier.startswith('MSG_') and '.MSG_' not in identifier:
            for f in ('desc', 'hidden', 'meaning'):
              if jsdoc.HasFlag(f):
                self._HandleError(
                    errors.INVALID_USE_OF_DESC_TAG,
                    'Member "%s" does not start with MSG_ and thus '
                    'should not have @%s JsDoc' % (identifier, f),
                    token)

      # Check for illegaly assigning live objects as prototype property values.
      index = identifier.find('.prototype.')
      # Ignore anything with additional .s after the prototype.
      if index != -1 and identifier.find('.', index + 11) == -1:
        equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
        next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
        if next_code and (
            next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
            next_code.IsOperator('new')):
          self._HandleError(
              errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
              'Member %s cannot have a non-primitive value' % identifier,
              token)

    elif token_type == Type.END_PARAMETERS:
      # Find extra space at the end of parameter lists.  We check the token
      # prior to the current one when it is a closing paren.
      if (token.previous and token.previous.type == Type.PARAMETERS
          and self.ENDS_WITH_SPACE.search(token.previous.string)):
        self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
                          token.previous)

      jsdoc = state.GetDocComment()
      if state.GetFunction().is_interface:
        if token.previous and token.previous.type == Type.PARAMETERS:
          self._HandleError(
              errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
              'Interface constructor cannot have parameters',
              token.previous)
      elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
            and not jsdoc.InheritsDocumentation()
            and not state.InObjectLiteralDescendant() and not
            jsdoc.IsInvalidated()):
        distance, edit = jsdoc.CompareParameters(state.GetParams())
        if distance:
          params_iter = iter(state.GetParams())
          docs_iter = iter(jsdoc.ordered_params)

          for op in edit:
            if op == 'I':
              # Insertion.
              # Parsing doc comments is the same for all languages
              # but some languages care about parameters that don't have
              # doc comments and some languages don't care.
              # Languages that don't allow variables to by typed such as
              # JavaScript care but languages such as ActionScript or Java
              # that allow variables to be typed don't care.
              if not self._limited_doc_checks:
                self.HandleMissingParameterDoc(token, params_iter.next())

            elif op == 'D':
              # Deletion
              self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
                                'Found docs for non-existing parameter: "%s"' %
                                docs_iter.next(), token)
            elif op == 'S':
              # Substitution
              if not self._limited_doc_checks:
                self._HandleError(
                    errors.WRONG_PARAMETER_DOCUMENTATION,
                    'Parameter mismatch: got "%s", expected "%s"' %
                    (params_iter.next(), docs_iter.next()), token)

            else:
              # Equality - just advance the iterators
              params_iter.next()
              docs_iter.next()

    elif token_type == Type.STRING_TEXT:
      # If this is the first token after the start of the string, but it's at
      # the end of a line, we know we have a multi-line string.
      if token.previous.type in (
          Type.SINGLE_QUOTE_STRING_START,
          Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
        self._HandleError(errors.MULTI_LINE_STRING,
                          'Multi-line strings are not allowed', token)

    # This check is orthogonal to the ones above, and repeats some types, so
    # it is a plain if and not an elif.
    if token.type in Type.COMMENT_TYPES:
      if self.ILLEGAL_TAB.search(token.string):
        self._HandleError(errors.ILLEGAL_TAB,
                          'Illegal tab in comment "%s"' % token.string, token)

      trimmed = token.string.rstrip()
      if last_in_line and token.string != trimmed:
        # Check for extra whitespace at the end of a line.
        self._HandleError(
            errors.EXTRA_SPACE, 'Extra space at end of line', token,
            position=Position(len(trimmed), len(token.string) - len(trimmed)))

    # This check is also orthogonal since it is based on metadata.
    if token.metadata.is_implied_semicolon:
      self._HandleError(errors.MISSING_SEMICOLON,
                        'Missing semicolon at end of line', token)
Esempio n. 4
0
  def HandleError(self, error):
    """Attempts to fix the error.

    Args:
      error: The error object
    """
    code = error.code
    token = error.token

    if self._fix_error_codes and code not in self._fix_error_codes:
      return

    if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
      self._FixJsDocPipeNull(token.attached_object.jstype)

    elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
      iterator = token.attached_object.type_end_token
      if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
        iterator = iterator.previous

      ending_space = len(iterator.string) - len(iterator.string.rstrip())
      iterator.string = '%s=%s' % (iterator.string.rstrip(),
                                   ' ' * ending_space)

      # Create a new flag object with updated type info.
      token.attached_object = javascriptstatetracker.JsDocFlag(token)
      self._AddFix(token)

    elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
      iterator = token.attached_object.type_start_token
      if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
        iterator = iterator.next

      starting_space = len(iterator.string) - len(iterator.string.lstrip())
      iterator.string = '%s...%s' % (' ' * starting_space,
                                     iterator.string.lstrip())

      # Create a new flag object with updated type info.
      token.attached_object = javascriptstatetracker.JsDocFlag(token)
      self._AddFix(token)

    elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                  errors.MISSING_SEMICOLON):
      semicolon_token = Token(';', Type.SEMICOLON, token.line,
                              token.line_number)
      tokenutil.InsertTokenAfter(semicolon_token, token)
      token.metadata.is_implied_semicolon = False
      semicolon_token.metadata.is_implied_semicolon = False
      self._AddFix(token)

    elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                  errors.REDUNDANT_SEMICOLON,
                  errors.COMMA_AT_END_OF_LITERAL):
      self._DeleteToken(token)
      self._AddFix(token)

    elif code == errors.INVALID_JSDOC_TAG:
      if token.string == '@returns':
        token.string = '@return'
        self._AddFix(token)

    elif code == errors.FILE_MISSING_NEWLINE:
      # This error is fixed implicitly by the way we restore the file
      self._AddFix(token)

    elif code == errors.MISSING_SPACE:
      if error.fix_data:
        token.string = error.fix_data
        self._AddFix(token)
      elif error.position:
        if error.position.IsAtBeginning():
          tokenutil.InsertSpaceTokenAfter(token.previous)
        elif error.position.IsAtEnd(token.string):
          tokenutil.InsertSpaceTokenAfter(token)
        else:
          token.string = error.position.Set(token.string, ' ')
        self._AddFix(token)

    elif code == errors.EXTRA_SPACE:
      if error.position:
        token.string = error.position.Set(token.string, '')
        self._AddFix(token)

    elif code == errors.MISSING_LINE:
      if error.position.IsAtBeginning():
        tokenutil.InsertBlankLineAfter(token.previous)
      else:
        tokenutil.InsertBlankLineAfter(token)
      self._AddFix(token)

    elif code == errors.EXTRA_LINE:
      self._DeleteToken(token)
      self._AddFix(token)

    elif code == errors.WRONG_BLANK_LINE_COUNT:
      if not token.previous:
        # TODO(user): Add an insertBefore method to tokenutil.
        return

      num_lines = error.fix_data
      should_delete = False

      if num_lines < 0:
        num_lines *= -1
        should_delete = True

      for unused_i in xrange(1, num_lines + 1):
        if should_delete:
          # TODO(user): DeleteToken should update line numbers.
          self._DeleteToken(token.previous)
        else:
          tokenutil.InsertBlankLineAfter(token.previous)
        self._AddFix(token)

    elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
      end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
      if end_quote:
        single_quote_start = Token(
            "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
        single_quote_end = Token(
            "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
            token.line_number)

        tokenutil.InsertTokenAfter(single_quote_start, token)
        tokenutil.InsertTokenAfter(single_quote_end, end_quote)
        self._DeleteToken(token)
        self._DeleteToken(end_quote)
        self._AddFix([token, end_quote])

    elif code == errors.MISSING_BRACES_AROUND_TYPE:
      fixed_tokens = []
      start_token = token.attached_object.type_start_token

      if start_token.type != Type.DOC_START_BRACE:
        leading_space = (
            len(start_token.string) - len(start_token.string.lstrip()))
        if leading_space:
          start_token = tokenutil.SplitToken(start_token, leading_space)
          # Fix case where start and end token were the same.
          if token.attached_object.type_end_token == start_token.previous:
            token.attached_object.type_end_token = start_token

        new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
                          start_token.line_number)
        tokenutil.InsertTokenAfter(new_token, start_token.previous)
        token.attached_object.type_start_token = new_token
        fixed_tokens.append(new_token)

      end_token = token.attached_object.type_end_token
      if end_token.type != Type.DOC_END_BRACE:
        # If the start token was a brace, the end token will be a
        # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
        # the end token is the last token of the actual type.
        last_type = end_token
        if not fixed_tokens:
          last_type = end_token.previous

        while last_type.string.isspace():
          last_type = last_type.previous

        # If there was no starting brace then a lone end brace wouldn't have
        # been type end token. Now that we've added any missing start brace,
        # see if the last effective type token was an end brace.
        if last_type.type != Type.DOC_END_BRACE:
          trailing_space = (len(last_type.string) -
                            len(last_type.string.rstrip()))
          if trailing_space:
            tokenutil.SplitToken(last_type,
                                 len(last_type.string) - trailing_space)

          new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
                            last_type.line_number)
          tokenutil.InsertTokenAfter(new_token, last_type)
          token.attached_object.type_end_token = new_token
          fixed_tokens.append(new_token)

      self._AddFix(fixed_tokens)

    elif code == errors.LINE_STARTS_WITH_OPERATOR:
      # Remove whitespace following the operator so the line starts clean.
      self._StripSpace(token, before=False)

      # Remove the operator.
      tokenutil.DeleteToken(token)
      self._AddFix(token)

      insertion_point = tokenutil.GetPreviousCodeToken(token)

      # Insert a space between the previous token and the new operator.
      space = Token(' ', Type.WHITESPACE, insertion_point.line,
                    insertion_point.line_number)
      tokenutil.InsertTokenAfter(space, insertion_point)

      # Insert the operator on the end of the previous line.
      new_token = Token(token.string, token.type, insertion_point.line,
                        insertion_point.line_number)
      tokenutil.InsertTokenAfter(new_token, space)
      self._AddFix(new_token)

    elif code == errors.LINE_ENDS_WITH_DOT:
      # Remove whitespace preceding the operator to remove trailing whitespace.
      self._StripSpace(token, before=True)

      # Remove the dot.
      tokenutil.DeleteToken(token)
      self._AddFix(token)

      insertion_point = tokenutil.GetNextCodeToken(token)

      # Insert the dot at the beginning of the next line of code.
      new_token = Token(token.string, token.type, insertion_point.line,
                        insertion_point.line_number)
      tokenutil.InsertTokenBefore(new_token, insertion_point)
      self._AddFix(new_token)

    elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
      require_start_token = error.fix_data
      sorter = requireprovidesorter.RequireProvideSorter()
      sorter.FixRequires(require_start_token)

      self._AddFix(require_start_token)

    elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
      provide_start_token = error.fix_data
      sorter = requireprovidesorter.RequireProvideSorter()
      sorter.FixProvides(provide_start_token)

      self._AddFix(provide_start_token)

    elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
      if token.previous.string == '{' and token.next.string == '}':
        self._DeleteToken(token.previous)
        self._DeleteToken(token.next)
        self._AddFix([token])

    elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
      match = INVERTED_AUTHOR_SPEC.match(token.string)
      if match:
        token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
                                         match.group('email'),
                                         match.group('whitespace_after_name'),
                                         match.group('name'),
                                         match.group('trailing_characters'))
        self._AddFix(token)

    elif (code == errors.WRONG_INDENTATION and
          not FLAGS.disable_indentation_fixing):
      token = tokenutil.GetFirstTokenInSameLine(token)
      actual = error.position.start
      expected = error.position.length

      # Cases where first token is param but with leading spaces.
      if (len(token.string.lstrip()) == len(token.string) - actual and
          token.string.lstrip()):
        token.string = token.string.lstrip()
        actual = 0

      if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
        token.string = token.string.lstrip() + (' ' * expected)
        self._AddFix([token])
      else:
        # We need to add indentation.
        new_token = Token(' ' * expected, Type.WHITESPACE,
                          token.line, token.line_number)
        # Note that we'll never need to add indentation at the first line,
        # since it will always not be indented.  Therefore it's safe to assume
        # token.previous exists.
        tokenutil.InsertTokenAfter(new_token, token.previous)
        self._AddFix([token])

    elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
                  errors.MISSING_END_OF_SCOPE_COMMENT]:
      # Only fix cases where }); is found with no trailing content on the line
      # other than a comment. Value of 'token' is set to } for this error.
      if (token.type == Type.END_BLOCK and
          token.next.type == Type.END_PAREN and
          token.next.next.type == Type.SEMICOLON):
        current_token = token.next.next.next
        removed_tokens = []
        while current_token and current_token.line_number == token.line_number:
          if current_token.IsAnyType(Type.WHITESPACE,
                                     Type.START_SINGLE_LINE_COMMENT,
                                     Type.COMMENT):
            removed_tokens.append(current_token)
            current_token = current_token.next
          else:
            return

        if removed_tokens:
          self._DeleteTokens(removed_tokens[0], len(removed_tokens))

        whitespace_token = Token('  ', Type.WHITESPACE, token.line,
                                 token.line_number)
        start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
                                    token.line, token.line_number)
        comment_token = Token(' goog.scope', Type.COMMENT, token.line,
                              token.line_number)
        insertion_tokens = [whitespace_token, start_comment_token,
                            comment_token]

        tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
        self._AddFix(removed_tokens + insertion_tokens)

    elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
      tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
      num_delete_tokens = len(tokens_in_line)
      # If line being deleted is preceded and succeed with blank lines then
      # delete one blank line also.
      if (tokens_in_line[0].previous and tokens_in_line[-1].next
          and tokens_in_line[0].previous.type == Type.BLANK_LINE
          and tokens_in_line[-1].next.type == Type.BLANK_LINE):
        num_delete_tokens += 1
      self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
      self._AddFix(tokens_in_line)

    elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
      missing_namespaces = error.fix_data[0]
      need_blank_line = error.fix_data[1] or (not token.previous)

      insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
      dummy_first_token = insert_location
      tokenutil.InsertTokenBefore(insert_location, token)

      # If inserting a blank line check blank line does not exist before
      # token to avoid extra blank lines.
      if (need_blank_line and insert_location.previous
          and insert_location.previous.type != Type.BLANK_LINE):
        tokenutil.InsertBlankLineAfter(insert_location)
        insert_location = insert_location.next

      for missing_namespace in missing_namespaces:
        new_tokens = self._GetNewRequireOrProvideTokens(
            code == errors.MISSING_GOOG_PROVIDE,
            missing_namespace, insert_location.line_number + 1)
        tokenutil.InsertLineAfter(insert_location, new_tokens)
        insert_location = new_tokens[-1]
        self._AddFix(new_tokens)

      # If inserting a blank line check blank line does not exist after
      # token to avoid extra blank lines.
      if (need_blank_line and insert_location.next
          and insert_location.next.type != Type.BLANK_LINE):
        tokenutil.InsertBlankLineAfter(insert_location)

      tokenutil.DeleteToken(dummy_first_token)
  def CheckToken(self, token, state):
    """Checks a token for indentation errors.

    Args:
      token: The current token under consideration
      state: Additional information about the current tree state

    Returns:
      An error array [error code, error string, error token] if the token is
      improperly indented, or None if indentation is correct.
    """

    token_type = token.type
    indentation_errors = []
    stack = self._stack
    is_first = self._IsFirstNonWhitespaceTokenInLine(token)

    # Add tokens that could decrease indentation before checking.
    if token_type == Type.END_PAREN:
      self._PopTo(Type.START_PAREN)

    elif token_type == Type.END_PARAMETERS:
      self._PopTo(Type.START_PARAMETERS)

    elif token_type == Type.END_BRACKET:
      self._PopTo(Type.START_BRACKET)

    elif token_type == Type.END_BLOCK:
      start_token = self._PopTo(Type.START_BLOCK)
      # Check for required goog.scope comment.
      if start_token:
        goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
        if goog_scope is not None:
          if not token.line.endswith(';  // goog.scope\n'):
            if (token.line.find('//') > -1 and
                token.line.find('goog.scope') >
                token.line.find('//')):
              indentation_errors.append([
                  errors.MALFORMED_END_OF_SCOPE_COMMENT,
                  ('Malformed end of goog.scope comment. Please use the '
                   'exact following syntax to close the scope:\n'
                   '});  // goog.scope'),
                  token,
                  Position(token.start_index, token.length)])
            else:
              indentation_errors.append([
                  errors.MISSING_END_OF_SCOPE_COMMENT,
                  ('Missing comment for end of goog.scope which opened at line '
                   '%d. End the scope with:\n'
                   '});  // goog.scope' %
                   (start_token.line_number)),
                  token,
                  Position(token.start_index, token.length)])

    elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
      self._Add(self._PopTo(Type.START_BLOCK))

    elif token_type == Type.SEMICOLON:
      self._PopTransient()

    if (is_first and
        token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
      if flags.FLAGS.debug_indentation:
        print 'Line #%d: stack %r' % (token.line_number, stack)

      # Ignore lines that start in JsDoc since we don't check them properly yet.
      # TODO(robbyw): Support checking JsDoc indentation.
      # Ignore lines that start as multi-line strings since indentation is N/A.
      # Ignore lines that start with operators since we report that already.
      # Ignore lines with tabs since we report that already.
      expected = self._GetAllowableIndentations()
      actual = self._GetActualIndentation(token)

      # Special case comments describing else, case, and default.  Allow them
      # to outdent to the parent block.
      if token_type in Type.COMMENT_TYPES:
        next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
        if next_code and next_code.type == Type.END_BLOCK:
          next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
        if next_code and next_code.string in ('else', 'case', 'default'):
          # TODO(robbyw): This almost certainly introduces false negatives.
          expected |= self._AddToEach(expected, -2)

      if actual >= 0 and actual not in expected:
        expected = sorted(expected)
        indentation_errors.append([
            errors.WRONG_INDENTATION,
            'Wrong indentation: expected any of {%s} but got %d' % (
                ', '.join('%d' % x for x in expected if x < 80), actual),
            token,
            Position(actual, expected[0])])
        self._start_index_offset[token.line_number] = expected[0] - actual

    # Add tokens that could increase indentation.
    if token_type == Type.START_BRACKET:
      self._Add(TokenInfo(
          token=token,
          is_block=token.metadata.context.type == Context.ARRAY_LITERAL))

    elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
      self._Add(TokenInfo(token=token, is_block=True))

    elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
      self._Add(TokenInfo(token=token, is_block=False))

    elif token_type == Type.KEYWORD and token.string == 'return':
      self._Add(TokenInfo(token))

    elif not token.IsLastInLine() and (
        token.IsAssignment() or token.IsOperator('?')):
      self._Add(TokenInfo(token=token))

    # Handle implied block closes.
    if token.metadata.is_implied_block_close:
      self._PopToImpliedBlock()

    # Add some tokens only if they appear at the end of the line.
    is_last = self._IsLastCodeInLine(token)
    if is_last:
      next_code_token = tokenutil.GetNextCodeToken(token)
      # Increase required indentation if this is an overlong wrapped statement
      # ending in an operator.
      if token_type == Type.OPERATOR:
        if token.string == ':':
          if stack and stack[-1].token.string == '?':
            # When a ternary : is on a different line than its '?', it doesn't
            # add indentation.
            if token.line_number == stack[-1].token.line_number:
              self._Add(TokenInfo(token))
          elif token.metadata.context.type == Context.CASE_BLOCK:
            # Pop transient tokens from say, line continuations, e.g.,
            # case x.
            #     y:
            # Want to pop the transient 4 space continuation indent.
            self._PopTransient()
            # Starting the body of the case statement, which is a type of
            # block.
            self._Add(TokenInfo(token=token, is_block=True))
          elif token.metadata.context.type == Context.LITERAL_ELEMENT:
            # When in an object literal, acts as operator indicating line
            # continuations.
            self._Add(TokenInfo(token))
          else:
            # ':' might also be a statement label, no effect on indentation in
            # this case.
            pass

        elif token.string != ',':
          self._Add(TokenInfo(token))
        else:
          # The token is a comma.
          if token.metadata.context.type == Context.VAR:
            self._Add(TokenInfo(token))
          elif token.metadata.context.type != Context.PARAMETERS:
            self._PopTransient()
      # Increase required indentation if this is the end of a statement that's
      # continued with an operator on the next line (e.g. the '.').
      elif (next_code_token and next_code_token.type == Type.OPERATOR and
            not next_code_token.metadata.IsUnaryOperator()):
        self._Add(TokenInfo(token))
      elif token_type == Type.PARAMETERS and token.string.endswith(','):
        # Parameter lists.
        self._Add(TokenInfo(token))
      elif token.IsKeyword('var'):
        self._Add(TokenInfo(token))
      elif token.metadata.is_implied_semicolon:
        self._PopTransient()
    elif token.IsAssignment():
      self._Add(TokenInfo(token))

    return indentation_errors