def fixRequiresTest_withTestOnly(self, position):
        """Regression-tests sorting even with a goog.setTestOnly statement.

        Args:
          position: The position in the list where to insert the goog.setTestOnly
                    statement. Will be used to test all possible combinations for
                    this test.
        """
        input_lines = [
            'goog.provide(\'package.subpackage.Whatever\');', '',
            'goog.require(\'package.subpackage.ClassB\');',
            'goog.require(\'package.subpackage.ClassA\');'
        ]
        expected_lines = [
            'goog.provide(\'package.subpackage.Whatever\');', '',
            'goog.require(\'package.subpackage.ClassA\');',
            'goog.require(\'package.subpackage.ClassB\');'
        ]
        input_lines.insert(position, 'goog.setTestOnly();')
        expected_lines.insert(position, 'goog.setTestOnly();')

        token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)

        sorter = requireprovidesorter.RequireProvideSorter()
        sorter.FixRequires(token)

        self.assertEquals(expected_lines, self._GetLines(token))
    def _CheckSortedRequiresProvides(self, token):
        """Checks that all goog.require and goog.provide statements are sorted.

        Note that this method needs to be run after missing statements are added to
        preserve alphabetical order.

        Args:
          token: The first token in the token stream.
        """
        sorter = requireprovidesorter.RequireProvideSorter()
        first_provide_token = sorter.CheckProvides(token)
        if first_provide_token:
            new_order = sorter.GetFixedProvideString(first_provide_token)
            self._HandleError(
                errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
                'goog.provide classes must be alphabetized.  The correct code is:\n'
                + new_order,
                first_provide_token,
                position=Position.AtBeginning(),
                fix_data=first_provide_token)

        first_require_token = sorter.CheckRequires(token)
        if first_require_token:
            new_order = sorter.GetFixedRequireString(first_require_token)
            self._HandleError(
                errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
                'goog.require classes must be alphabetized.  The correct code is:\n'
                + new_order,
                first_require_token,
                position=Position.AtBeginning(),
                fix_data=first_require_token)
    def testFixRequires_removeBlankLines(self):
        """Tests that blank lines are omitted in sorted goog.require statements."""
        input_lines = [
            'goog.provide(\'package.subpackage.Whatever\');', '',
            'goog.require(\'package.subpackage.ClassB\');', '',
            'goog.require(\'package.subpackage.ClassA\');'
        ]
        expected_lines = [
            'goog.provide(\'package.subpackage.Whatever\');', '',
            'goog.require(\'package.subpackage.ClassA\');',
            'goog.require(\'package.subpackage.ClassB\');'
        ]
        token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)

        sorter = requireprovidesorter.RequireProvideSorter()
        sorter.FixRequires(token)

        self.assertEquals(expected_lines, self._GetLines(token))
    def testGetFixedRequireString(self):
        """Tests that fixed string constains proper comments also."""
        input_lines = [
            'goog.require(\'package.xyz\');',
            '/** This is needed for scope. **/',
            'goog.require(\'package.abcd\');'
        ]

        expected_lines = [
            '/** This is needed for scope. **/',
            'goog.require(\'package.abcd\');', 'goog.require(\'package.xyz\');'
        ]

        token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)

        sorter = requireprovidesorter.RequireProvideSorter()
        fixed_require_string = sorter.GetFixedRequireString(token)

        self.assertEquals(expected_lines, fixed_require_string.splitlines())
예제 #5
0
    def HandleError(self, error):
        """Attempts to fix the error.

        Args:
          error: The error object
        """
        code = error.code
        token = error.token

        if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
            iterator = token.attached_object.type_start_token
            if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(
            ):
                iterator = iterator.next

            leading_space = len(iterator.string) - len(
                iterator.string.lstrip())
            iterator.string = '%s?%s' % (' ' * leading_space,
                                         iterator.string.lstrip())

            # Cover the no outer brace case where the end token is part of the type.
            while iterator and iterator != token.attached_object.type_end_token.next:
                iterator.string = iterator.string.replace('null|', '').replace(
                    '|null', '')
                iterator = iterator.next

            # Create a new flag object with updated type info.
            token.attached_object = javascriptstatetracker.JsDocFlag(token)
            self._AddFix(token)

        elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
            iterator = token.attached_object.type_end_token
            if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace(
            ):
                iterator = iterator.previous

            ending_space = len(iterator.string) - len(iterator.string.rstrip())
            iterator.string = '%s=%s' % (iterator.string.rstrip(),
                                         ' ' * ending_space)

            # Create a new flag object with updated type info.
            token.attached_object = javascriptstatetracker.JsDocFlag(token)
            self._AddFix(token)

        elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
            iterator = token.attached_object.type_start_token
            if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(
            ):
                iterator = iterator.next

            starting_space = len(iterator.string) - len(
                iterator.string.lstrip())
            iterator.string = '%s...%s' % (' ' * starting_space,
                                           iterator.string.lstrip())

            # Create a new flag object with updated type info.
            token.attached_object = javascriptstatetracker.JsDocFlag(token)
            self._AddFix(token)

        elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                      errors.MISSING_SEMICOLON):
            semicolon_token = Token(';', Type.SEMICOLON, token.line,
                                    token.line_number)
            tokenutil.InsertTokenAfter(semicolon_token, token)
            token.metadata.is_implied_semicolon = False
            semicolon_token.metadata.is_implied_semicolon = False
            self._AddFix(token)

        elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                      errors.REDUNDANT_SEMICOLON,
                      errors.COMMA_AT_END_OF_LITERAL):
            self._DeleteToken(token)
            self._AddFix(token)

        elif code == errors.INVALID_JSDOC_TAG:
            if token.string == '@returns':
                token.string = '@return'
                self._AddFix(token)

        elif code == errors.FILE_MISSING_NEWLINE:
            # This error is fixed implicitly by the way we restore the file
            self._AddFix(token)

        elif code == errors.MISSING_SPACE:
            if error.fix_data:
                token.string = error.fix_data
                self._AddFix(token)
            elif error.position:
                if error.position.IsAtBeginning():
                    tokenutil.InsertSpaceTokenAfter(token.previous)
                elif error.position.IsAtEnd(token.string):
                    tokenutil.InsertSpaceTokenAfter(token)
                else:
                    token.string = error.position.Set(token.string, ' ')
                self._AddFix(token)

        elif code == errors.EXTRA_SPACE:
            if error.position:
                token.string = error.position.Set(token.string, '')
                self._AddFix(token)

        elif code == errors.MISSING_LINE:
            if error.position.IsAtBeginning():
                tokenutil.InsertBlankLineAfter(token.previous)
            else:
                tokenutil.InsertBlankLineAfter(token)
            self._AddFix(token)

        elif code == errors.EXTRA_LINE:
            self._DeleteToken(token)
            self._AddFix(token)

        elif code == errors.WRONG_BLANK_LINE_COUNT:
            if not token.previous:
                # TODO(user): Add an insertBefore method to tokenutil.
                return

            num_lines = error.fix_data
            should_delete = False

            if num_lines < 0:
                num_lines *= -1
                should_delete = True

            for unused_i in xrange(1, num_lines + 1):
                if should_delete:
                    # TODO(user): DeleteToken should update line numbers.
                    self._DeleteToken(token.previous)
                else:
                    tokenutil.InsertBlankLineAfter(token.previous)
                self._AddFix(token)

        elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
            end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
            if end_quote:
                single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START,
                                           token.line, token.line_number)
                single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START,
                                         end_quote.line, token.line_number)

                tokenutil.InsertTokenAfter(single_quote_start, token)
                tokenutil.InsertTokenAfter(single_quote_end, end_quote)
                self._DeleteToken(token)
                self._DeleteToken(end_quote)
                self._AddFix([token, end_quote])

        elif code == errors.MISSING_BRACES_AROUND_TYPE:
            fixed_tokens = []
            start_token = token.attached_object.type_start_token

            if start_token.type != Type.DOC_START_BRACE:
                leading_space = (len(start_token.string) -
                                 len(start_token.string.lstrip()))
                if leading_space:
                    start_token = tokenutil.SplitToken(start_token,
                                                       leading_space)
                    # Fix case where start and end token were the same.
                    if token.attached_object.type_end_token == start_token.previous:
                        token.attached_object.type_end_token = start_token

                new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
                                  start_token.line_number)
                tokenutil.InsertTokenAfter(new_token, start_token.previous)
                token.attached_object.type_start_token = new_token
                fixed_tokens.append(new_token)

            end_token = token.attached_object.type_end_token
            if end_token.type != Type.DOC_END_BRACE:
                # If the start token was a brace, the end token will be a
                # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
                # the end token is the last token of the actual type.
                last_type = end_token
                if not fixed_tokens:
                    last_type = end_token.previous

                while last_type.string.isspace():
                    last_type = last_type.previous

                # If there was no starting brace then a lone end brace wouldn't have
                # been type end token. Now that we've added any missing start brace,
                # see if the last effective type token was an end brace.
                if last_type.type != Type.DOC_END_BRACE:
                    trailing_space = (len(last_type.string) -
                                      len(last_type.string.rstrip()))
                    if trailing_space:
                        tokenutil.SplitToken(
                            last_type,
                            len(last_type.string) - trailing_space)

                    new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
                                      last_type.line_number)
                    tokenutil.InsertTokenAfter(new_token, last_type)
                    token.attached_object.type_end_token = new_token
                    fixed_tokens.append(new_token)

            self._AddFix(fixed_tokens)

        elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
            require_start_token = error.fix_data
            sorter = requireprovidesorter.RequireProvideSorter()
            sorter.FixRequires(require_start_token)

            self._AddFix(require_start_token)

        elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
            provide_start_token = error.fix_data
            sorter = requireprovidesorter.RequireProvideSorter()
            sorter.FixProvides(provide_start_token)

            self._AddFix(provide_start_token)

        elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
            if token.previous.string == '{' and token.next.string == '}':
                self._DeleteToken(token.previous)
                self._DeleteToken(token.next)
                self._AddFix([token])

        elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
            match = INVERTED_AUTHOR_SPEC.match(token.string)
            if match:
                token.string = '%s%s%s(%s)%s' % (
                    match.group('leading_whitespace'), match.group('email'),
                    match.group('whitespace_after_name'), match.group('name'),
                    match.group('trailing_characters'))
                self._AddFix(token)

        elif (code == errors.WRONG_INDENTATION
              and not FLAGS.disable_indentation_fixing):
            token = tokenutil.GetFirstTokenInSameLine(token)
            actual = error.position.start
            expected = error.position.length

            # Cases where first token is param but with leading spaces.
            if (len(token.string.lstrip()) == len(token.string) - actual
                    and token.string.lstrip()):
                token.string = token.string.lstrip()
                actual = 0

            if token.type in (Type.WHITESPACE,
                              Type.PARAMETERS) and actual != 0:
                token.string = token.string.lstrip() + (' ' * expected)
                self._AddFix([token])
            else:
                # We need to add indentation.
                new_token = Token(' ' * expected, Type.WHITESPACE, token.line,
                                  token.line_number)
                # Note that we'll never need to add indentation at the first line,
                # since it will always not be indented.  Therefore it's safe to assume
                # token.previous exists.
                tokenutil.InsertTokenAfter(new_token, token.previous)
                self._AddFix([token])

        elif code in [
                errors.MALFORMED_END_OF_SCOPE_COMMENT,
                errors.MISSING_END_OF_SCOPE_COMMENT
        ]:
            # Only fix cases where }); is found with no trailing content on the line
            # other than a comment. Value of 'token' is set to } for this error.
            if (token.type == Type.END_BLOCK
                    and token.next.type == Type.END_PAREN
                    and token.next.next.type == Type.SEMICOLON):
                current_token = token.next.next.next
                removed_tokens = []
                while current_token and current_token.line_number == token.line_number:
                    if current_token.IsAnyType(Type.WHITESPACE,
                                               Type.START_SINGLE_LINE_COMMENT,
                                               Type.COMMENT):
                        removed_tokens.append(current_token)
                        current_token = current_token.next
                    else:
                        return

                if removed_tokens:
                    self._DeleteTokens(removed_tokens[0], len(removed_tokens))

                whitespace_token = Token('  ', Type.WHITESPACE, token.line,
                                         token.line_number)
                start_comment_token = Token('//',
                                            Type.START_SINGLE_LINE_COMMENT,
                                            token.line, token.line_number)
                comment_token = Token(' goog.scope', Type.COMMENT, token.line,
                                      token.line_number)
                insertion_tokens = [
                    whitespace_token, start_comment_token, comment_token
                ]

                tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
                self._AddFix(removed_tokens + insertion_tokens)

        elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
            tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
            self._DeleteTokens(tokens_in_line[0], len(tokens_in_line))
            self._AddFix(tokens_in_line)

        elif code in [
                errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE
        ]:
            is_provide = code == errors.MISSING_GOOG_PROVIDE
            is_require = code == errors.MISSING_GOOG_REQUIRE

            missing_namespaces = error.fix_data[0]
            need_blank_line = error.fix_data[1]

            if need_blank_line is None:
                # TODO(user): This happens when there are no existing
                # goog.provide or goog.require statements to position new statements
                # relative to. Consider handling this case with a heuristic.
                return

            insert_location = token.previous

            # If inserting a missing require with no existing requires, insert a
            # blank line first.
            if need_blank_line and is_require:
                tokenutil.InsertBlankLineAfter(insert_location)
                insert_location = insert_location.next

            for missing_namespace in missing_namespaces:
                new_tokens = self._GetNewRequireOrProvideTokens(
                    is_provide, missing_namespace,
                    insert_location.line_number + 1)
                tokenutil.InsertLineAfter(insert_location, new_tokens)
                insert_location = new_tokens[-1]
                self._AddFix(new_tokens)

            # If inserting a missing provide with no existing provides, insert a
            # blank line after.
            if need_blank_line and is_provide:
                tokenutil.InsertBlankLineAfter(insert_location)