Ejemplo n.º 1
0
    def _FixProvidesOrRequires(self, tokens):
        """Sorts goog.provide or goog.require statements.

    Args:
      tokens: A list of goog.provide or goog.require tokens in the order they
              appear in the token stream. i.e. the first token in this list must
              be the first goog.provide or goog.require token.
    """
        strings = self._GetRequireOrProvideTokenStrings(tokens)
        sorted_strings = sorted(strings)

        # A map from required/provided object name to tokens that make up the line
        # it was on, including any comments immediately before it or after it on the
        # same line.
        tokens_map = self._GetTokensMap(tokens)

        # Iterate over the map removing all tokens.
        for name in tokens_map:
            tokens_to_delete = tokens_map[name]
            for i in tokens_to_delete:
                tokenutil.DeleteToken(i)

        # Re-add all tokens in the map in alphabetical order.
        insert_after = tokens[0].previous
        for string in sorted_strings:
            for i in tokens_map[string]:
                tokenutil.InsertTokenAfter(i, insert_after)
                insert_after = i
    def _FixProvidesOrRequires(self, tokens):
        """Sorts goog.provide or goog.require statements.

    Args:
      tokens: A list of goog.provide or goog.require tokens in the order they
              appear in the token stream. i.e. the first token in this list must
              be the first goog.provide or goog.require token.
    """
        strings = self._GetRequireOrProvideTokenStrings(tokens)
        sorted_strings = sorted(strings)

        # Make a separate pass to remove any blank lines between goog.require/
        # goog.provide tokens.
        first_token = tokens[0]
        last_token = tokens[-1]
        i = last_token
        while i != first_token:
            if i.type is Type.BLANK_LINE:
                tokenutil.DeleteToken(i)
            i = i.previous

        # A map from required/provided object name to tokens that make up the line
        # it was on, including any comments immediately before it or after it on the
        # same line.
        tokens_map = self._GetTokensMap(tokens)

        # Iterate over the map removing all tokens.
        for name in tokens_map:
            tokens_to_delete = tokens_map[name]
            for i in tokens_to_delete:
                tokenutil.DeleteToken(i)

        # Save token to rest of file. Sorted token will be inserted before this.
        rest_of_file = tokens_map[strings[-1]][-1].next

        # Re-add all tokens in the map in alphabetical order.
        insert_after = tokens[0].previous
        for string in sorted_strings:
            for i in tokens_map[string]:
                if rest_of_file:
                    tokenutil.InsertTokenBefore(i, rest_of_file)
                else:
                    tokenutil.InsertTokenAfter(i, insert_after)
                    insert_after = i
Ejemplo n.º 3
0
  def _StripSpace(self, token, before):
    """Strip whitespace tokens either preceding or following the given token.

    Args:
      token: The token.
      before: If true, strip space before the token, if false, after it.
    """
    token = token.previous if before else token.next
    while token and token.type == Type.WHITESPACE:
      tokenutil.DeleteToken(token)
      token = token.previous if before else token.next
Ejemplo n.º 4
0
  def _DeleteToken(self, token):
    """Deletes the specified token from the linked list of tokens.

    Updates instance variables pointing to tokens such as _file_token if
    they reference the deleted token.

    Args:
      token: The token to delete.
    """
    if token == self._file_token:
      self._file_token = token.next

    tokenutil.DeleteToken(token)
Ejemplo n.º 5
0
  def _FixJsDocPipeNull(self, js_type):
    """Change number|null or null|number to ?number.

    Args:
      js_type: The typeannotation.TypeAnnotation instance to fix.
    """

    # Recurse into all sub_types if the error was at a deeper level.
    map(self._FixJsDocPipeNull, js_type.IterTypes())

    if js_type.type_group and len(js_type.sub_types) == 2:
      # Find and remove the null sub_type:
      sub_type = None
      for sub_type in js_type.sub_types:
        if sub_type.identifier == 'null':
          map(tokenutil.DeleteToken, sub_type.tokens)
          self._AddFix(sub_type.tokens)
          break
      else:
        return

      first_token = js_type.FirstToken()
      question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
                            first_token.line_number)
      tokenutil.InsertTokenBefore(question_mark, first_token)
      js_type.tokens.insert(0, question_mark)
      js_type.tokens.remove(sub_type)
      js_type.sub_types.remove(sub_type)
      js_type.or_null = True

      # Now also remove the separator, which is in the parent's token list,
      # either before or after the sub_type, there is exactly one. Scan for it.
      for token in js_type.tokens:
        if (token and isinstance(token, Token) and
            token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
          tokenutil.DeleteToken(token)
          js_type.tokens.remove(token)
          self._AddFix(token)
          break
  def HandleError(self, error):
    """Attempts to fix the error.

    Args:
      error: The error object
    """
    code = error.code
    token = error.token

    if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
      iterator = token.attached_object.type_start_token
      if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
        iterator = iterator.next

      leading_space = len(iterator.string) - len(iterator.string.lstrip())
      iterator.string = '%s?%s' % (' ' * leading_space,
                                   iterator.string.lstrip())

      # Cover the no outer brace case where the end token is part of the type.
      while iterator and iterator != token.attached_object.type_end_token.next:
        iterator.string = iterator.string.replace(
            'null|', '').replace('|null', '')
        iterator = iterator.next

      # Create a new flag object with updated type info.
      token.attached_object = javascriptstatetracker.JsDocFlag(token)
      self._AddFix(token)

    elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
      iterator = token.attached_object.type_end_token
      if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
        iterator = iterator.previous

      ending_space = len(iterator.string) - len(iterator.string.rstrip())
      iterator.string = '%s=%s' % (iterator.string.rstrip(),
                                   ' ' * ending_space)

      # Create a new flag object with updated type info.
      token.attached_object = javascriptstatetracker.JsDocFlag(token)
      self._AddFix(token)

    elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                  errors.MISSING_SEMICOLON):
      semicolon_token = Token(';', Type.SEMICOLON, token.line,
                              token.line_number)
      tokenutil.InsertTokenAfter(semicolon_token, token)
      token.metadata.is_implied_semicolon = False
      semicolon_token.metadata.is_implied_semicolon = False
      self._AddFix(token)

    elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                  errors.REDUNDANT_SEMICOLON,
                  errors.COMMA_AT_END_OF_LITERAL):
      tokenutil.DeleteToken(token)
      self._AddFix(token)

    elif code == errors.INVALID_JSDOC_TAG:
      if token.string == '@returns':
        token.string = '@return'
        self._AddFix(token)

    elif code == errors.FILE_MISSING_NEWLINE:
      # This error is fixed implicitly by the way we restore the file
      self._AddFix(token)

    elif code == errors.MISSING_SPACE:
      if error.position:
        if error.position.IsAtBeginning():
          tokenutil.InsertSpaceTokenAfter(token.previous)
        elif error.position.IsAtEnd(token.string):
          tokenutil.InsertSpaceTokenAfter(token)
        else:
          token.string = error.position.Set(token.string, ' ')
        self._AddFix(token)

    elif code == errors.EXTRA_SPACE:
      if error.position:
        token.string = error.position.Set(token.string, '')
        self._AddFix(token)

    elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
      token.string = error.position.Set(token.string, '.')
      self._AddFix(token)

    elif code == errors.MISSING_LINE:
      if error.position.IsAtBeginning():
        tokenutil.InsertBlankLineAfter(token.previous)
      else:
        tokenutil.InsertBlankLineAfter(token)
      self._AddFix(token)

    elif code == errors.EXTRA_LINE:
      tokenutil.DeleteToken(token)
      self._AddFix(token)

    elif code == errors.WRONG_BLANK_LINE_COUNT:
      if not token.previous:
        # TODO(user): Add an insertBefore method to tokenutil.
        return

      num_lines = error.fix_data
      should_delete = False

      if num_lines < 0:
        num_lines *= -1
        should_delete = True

      for i in xrange(1, num_lines + 1):
        if should_delete:
          # TODO(user): DeleteToken should update line numbers.
          tokenutil.DeleteToken(token.previous)
        else:
          tokenutil.InsertBlankLineAfter(token.previous)
        self._AddFix(token)

    elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
      end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
      if end_quote:
        single_quote_start = Token(
            "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
        single_quote_end = Token(
            "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
            token.line_number)

        tokenutil.InsertTokenAfter(single_quote_start, token)
        tokenutil.InsertTokenAfter(single_quote_end, end_quote)
        tokenutil.DeleteToken(token)
        tokenutil.DeleteToken(end_quote)
        self._AddFix([token, end_quote])

    elif code == errors.MISSING_BRACES_AROUND_TYPE:
      fixed_tokens = []
      start_token = token.attached_object.type_start_token

      if start_token.type != Type.DOC_START_BRACE:
        leading_space = (
            len(start_token.string) - len(start_token.string.lstrip()))
        if leading_space:
          start_token = tokenutil.SplitToken(start_token, leading_space)
          # Fix case where start and end token were the same.
          if token.attached_object.type_end_token == start_token.previous:
            token.attached_object.type_end_token = start_token

        new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
                          start_token.line_number)
        tokenutil.InsertTokenAfter(new_token, start_token.previous)
        token.attached_object.type_start_token = new_token
        fixed_tokens.append(new_token)

      end_token = token.attached_object.type_end_token
      if end_token.type != Type.DOC_END_BRACE:
        # If the start token was a brace, the end token will be a
        # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
        # the end token is the last token of the actual type.
        last_type = end_token
        if not fixed_tokens:
          last_type = end_token.previous

        while last_type.string.isspace():
          last_type = last_type.previous

        # If there was no starting brace then a lone end brace wouldn't have
        # been type end token. Now that we've added any missing start brace,
        # see if the last effective type token was an end brace.
        if last_type.type != Type.DOC_END_BRACE:
          trailing_space = (len(last_type.string) -
                            len(last_type.string.rstrip()))
          if trailing_space:
            tokenutil.SplitToken(last_type,
                                 len(last_type.string) - trailing_space)

          new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
                            last_type.line_number)
          tokenutil.InsertTokenAfter(new_token, last_type)
          token.attached_object.type_end_token = new_token
          fixed_tokens.append(new_token)

      self._AddFix(fixed_tokens)

    elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
      require_start_token = error.fix_data
      sorter = requireprovidesorter.RequireProvideSorter()
      sorter.FixRequires(require_start_token)

      self._AddFix(require_start_token)

    elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
      provide_start_token = error.fix_data
      sorter = requireprovidesorter.RequireProvideSorter()
      sorter.FixProvides(provide_start_token)

      self._AddFix(provide_start_token)

    elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
      if token.previous.string == '{' and token.next.string == '}':
        tokenutil.DeleteToken(token.previous)
        tokenutil.DeleteToken(token.next)
        self._AddFix([token])

    elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
      match = INVERTED_AUTHOR_SPEC.match(token.string)
      if match:
        token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
                                         match.group('email'),
                                         match.group('whitespace_after_name'),
                                         match.group('name'),
                                         match.group('trailing_characters'))
        self._AddFix(token)

    elif (code == errors.WRONG_INDENTATION and
          not FLAGS.disable_indentation_fixing):
      token = tokenutil.GetFirstTokenInSameLine(token)
      actual = error.position.start
      expected = error.position.length

      if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
        token.string = token.string.lstrip() + (' ' * expected)
        self._AddFix([token])
      else:
        # We need to add indentation.
        new_token = Token(' ' * expected, Type.WHITESPACE,
                          token.line, token.line_number)
        # Note that we'll never need to add indentation at the first line,
        # since it will always not be indented.  Therefore it's safe to assume
        # token.previous exists.
        tokenutil.InsertTokenAfter(new_token, token.previous)
        self._AddFix([token])

    elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
                  errors.MISSING_END_OF_SCOPE_COMMENT]:
      # Only fix cases where }); is found with no trailing content on the line
      # other than a comment. Value of 'token' is set to } for this error.
      if (token.type == Type.END_BLOCK and
          token.next.type == Type.END_PAREN and
          token.next.next.type == Type.SEMICOLON):
        current_token = token.next.next.next
        removed_tokens = []
        while current_token and current_token.line_number == token.line_number:
          if current_token.IsAnyType(Type.WHITESPACE,
                                     Type.START_SINGLE_LINE_COMMENT,
                                     Type.COMMENT):
            removed_tokens.append(current_token)
            current_token = current_token.next
          else:
            return

        if removed_tokens:
          tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))

        whitespace_token = Token('  ', Type.WHITESPACE, token.line,
                                 token.line_number)
        start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
                                    token.line, token.line_number)
        comment_token = Token(' goog.scope', Type.COMMENT, token.line,
                              token.line_number)
        insertion_tokens = [whitespace_token, start_comment_token,
                            comment_token]

        tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
        self._AddFix(removed_tokens + insertion_tokens)

    elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
      tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
      tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
      self._AddFix(tokens_in_line)

    elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
      is_provide = code == errors.MISSING_GOOG_PROVIDE
      is_require = code == errors.MISSING_GOOG_REQUIRE

      missing_namespaces = error.fix_data[0]
      need_blank_line = error.fix_data[1]

      if need_blank_line is None:
        # TODO(user): This happens when there are no existing
        # goog.provide or goog.require statements to position new statements
        # relative to. Consider handling this case with a heuristic.
        return

      insert_location = token.previous

      # If inserting a missing require with no existing requires, insert a
      # blank line first.
      if need_blank_line and is_require:
        tokenutil.InsertBlankLineAfter(insert_location)
        insert_location = insert_location.next

      for missing_namespace in missing_namespaces:
        new_tokens = self._GetNewRequireOrProvideTokens(
            is_provide, missing_namespace, insert_location.line_number + 1)
        tokenutil.InsertLineAfter(insert_location, new_tokens)
        insert_location = new_tokens[-1]
        self._AddFix(new_tokens)

      # If inserting a missing provide with no existing provides, insert a
      # blank line after.
      if need_blank_line and is_provide:
        tokenutil.InsertBlankLineAfter(insert_location)
Ejemplo n.º 7
0
    def HandleError(self, error):
        """Attempts to fix the error.

    Args:
      error: The error object
    """
        code = error.code
        token = error.token

        if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
            iterator = token.attached_object.type_start_token
            if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(
            ):
                iterator = iterator.next

            leading_space = len(iterator.string) - len(
                iterator.string.lstrip())
            iterator.string = '%s?%s' % (' ' * leading_space,
                                         iterator.string.lstrip())

            # Cover the no outer brace case where the end token is part of the type.
            while iterator and iterator != token.attached_object.type_end_token.next:
                iterator.string = iterator.string.replace('null|', '').replace(
                    '|null', '')
                iterator = iterator.next

            # Create a new flag object with updated type info.
            token.attached_object = javascriptstatetracker.JsDocFlag(token)
            self._AddFix(token)

        elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                      errors.MISSING_SEMICOLON):
            semicolon_token = Token(';', Type.SEMICOLON, token.line,
                                    token.line_number)
            tokenutil.InsertTokenAfter(semicolon_token, token)
            token.metadata.is_implied_semicolon = False
            semicolon_token.metadata.is_implied_semicolon = False
            self._AddFix(token)

        elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                      errors.REDUNDANT_SEMICOLON,
                      errors.COMMA_AT_END_OF_LITERAL):
            tokenutil.DeleteToken(token)
            self._AddFix(token)

        elif code == errors.INVALID_JSDOC_TAG:
            if token.string == '@returns':
                token.string = '@return'
                self._AddFix(token)

        elif code == errors.FILE_MISSING_NEWLINE:
            # This error is fixed implicitly by the way we restore the file
            self._AddFix(token)

        elif code == errors.MISSING_SPACE:
            if error.position:
                if error.position.IsAtBeginning():
                    tokenutil.InsertSpaceTokenAfter(token.previous)
                elif error.position.IsAtEnd(token.string):
                    tokenutil.InsertSpaceTokenAfter(token)
                else:
                    token.string = error.position.Set(token.string, ' ')
                self._AddFix(token)

        elif code == errors.EXTRA_SPACE:
            if error.position:
                token.string = error.position.Set(token.string, '')
                self._AddFix(token)

        elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
            token.string = error.position.Set(token.string, '.')
            self._AddFix(token)

        elif code == errors.MISSING_LINE:
            if error.position.IsAtBeginning():
                tokenutil.InsertLineAfter(token.previous)
            else:
                tokenutil.InsertLineAfter(token)
            self._AddFix(token)

        elif code == errors.EXTRA_LINE:
            tokenutil.DeleteToken(token)
            self._AddFix(token)

        elif code == errors.WRONG_BLANK_LINE_COUNT:
            if not token.previous:
                # TODO(user): Add an insertBefore method to tokenutil.
                return

            num_lines = error.fix_data
            should_delete = False

            if num_lines < 0:
                num_lines = num_lines * -1
                should_delete = True

            for i in xrange(1, num_lines + 1):
                if should_delete:
                    # TODO(user): DeleteToken should update line numbers.
                    tokenutil.DeleteToken(token.previous)
                else:
                    tokenutil.InsertLineAfter(token.previous)
                self._AddFix(token)

        elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
            end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
            if end_quote:
                single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START,
                                           token.line, token.line_number)
                single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START,
                                         end_quote.line, token.line_number)

                tokenutil.InsertTokenAfter(single_quote_start, token)
                tokenutil.InsertTokenAfter(single_quote_end, end_quote)
                tokenutil.DeleteToken(token)
                tokenutil.DeleteToken(end_quote)
                self._AddFix([token, end_quote])

        elif code == errors.MISSING_BRACES_AROUND_TYPE:
            fixed_tokens = []
            start_token = token.attached_object.type_start_token

            if start_token.type != Type.DOC_START_BRACE:
                leading_space = (len(start_token.string) -
                                 len(start_token.string.lstrip()))
                if leading_space:
                    start_token = tokenutil.SplitToken(start_token,
                                                       leading_space)
                    # Fix case where start and end token were the same.
                    if token.attached_object.type_end_token == start_token.previous:
                        token.attached_object.type_end_token = start_token

                new_token = Token("{", Type.DOC_START_BRACE, start_token.line,
                                  start_token.line_number)
                tokenutil.InsertTokenAfter(new_token, start_token.previous)
                token.attached_object.type_start_token = new_token
                fixed_tokens.append(new_token)

            end_token = token.attached_object.type_end_token
            if end_token.type != Type.DOC_END_BRACE:
                # If the start token was a brace, the end token will be a
                # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
                # the end token is the last token of the actual type.
                last_type = end_token
                if not len(fixed_tokens):
                    last_type = end_token.previous

                while last_type.string.isspace():
                    last_type = last_type.previous

                # If there was no starting brace then a lone end brace wouldn't have
                # been type end token. Now that we've added any missing start brace,
                # see if the last effective type token was an end brace.
                if last_type.type != Type.DOC_END_BRACE:
                    trailing_space = (len(last_type.string) -
                                      len(last_type.string.rstrip()))
                    if trailing_space:
                        tokenutil.SplitToken(
                            last_type,
                            len(last_type.string) - trailing_space)

                    new_token = Token("}", Type.DOC_END_BRACE, last_type.line,
                                      last_type.line_number)
                    tokenutil.InsertTokenAfter(new_token, last_type)
                    token.attached_object.type_end_token = new_token
                    fixed_tokens.append(new_token)

            self._AddFix(fixed_tokens)

        elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
                      errors.GOOG_PROVIDES_NOT_ALPHABETIZED):
            tokens = error.fix_data
            strings = map(lambda x: x.string, tokens)
            sorted_strings = sorted(strings)

            index = 0
            changed_tokens = []
            for token in tokens:
                if token.string != sorted_strings[index]:
                    token.string = sorted_strings[index]
                    changed_tokens.append(token)
                index += 1

            self._AddFix(changed_tokens)

        elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
            if token.previous.string == '{' and token.next.string == '}':
                tokenutil.DeleteToken(token.previous)
                tokenutil.DeleteToken(token.next)
                self._AddFix([token])

        elif (code == errors.WRONG_INDENTATION
              and not FLAGS.disable_indentation_fixing):
            token = tokenutil.GetFirstTokenInSameLine(token)
            actual = error.position.start
            expected = error.position.length

            if token.type in (Type.WHITESPACE, Type.PARAMETERS):
                token.string = token.string.lstrip() + (' ' * expected)
                self._AddFix([token])
            else:
                # We need to add indentation.
                new_token = Token(' ' * expected, Type.WHITESPACE, token.line,
                                  token.line_number)
                # Note that we'll never need to add indentation at the first line,
                # since it will always not be indented.  Therefore it's safe to assume
                # token.previous exists.
                tokenutil.InsertTokenAfter(new_token, token.previous)
                self._AddFix([token])

        elif code == errors.EXTRA_REQUIRE:
            fixed_tokens = []
            while token:
                if token.type == Type.IDENTIFIER:
                    if token.string not in ['goog.require', 'goog.provide']:
                        # Stop iterating over tokens once we're out of the requires and
                        # provides.
                        break
                    if token.string == 'goog.require':
                        # Text of form: goog.require('required'), skipping past open paren
                        # and open quote to the string text.
                        required = token.next.next.next.string
                        if required in error.fix_data:
                            fixed_tokens.append(token)
                            # Want to delete: goog.require + open paren + open single-quote +
                            # text + close single-quote + close paren + semi-colon = 7.
                            tokenutil.DeleteTokens(token, 7)
                token = token.next

            self._AddFix(fixed_tokens)