コード例 #1
0
    def run(self, revision):
        '''
        Run modified files with specified checks through clang-tidy
        using threaded workers (communicate through queues)
        Output a list of ClangTidyIssue
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        # Run all files in a single command
        # through mach static-analysis
        cmd = [
            'gecko-env',
            './mach',
            '--log-no-times',
            'static-analysis',
            'check',

            # Limit warnings to current files
            '--header-filter={}'.format('|'.join(
                os.path.basename(filename) for filename in revision.files)),
            '--checks={}'.format(','.join(c['name']
                                          for c in settings.clang_checkers)),
        ] + list(revision.files)
        logger.info('Running static-analysis', cmd=' '.join(cmd))

        # Run command, without checking its exit code as clang-tidy 7+
        # exits with an error code when finding errors (which we want to report !)
        try:
            clang = subprocess.run(cmd,
                                   cwd=settings.repo_dir,
                                   check=False,
                                   stdout=subprocess.PIPE)
        except subprocess.CalledProcessError as e:
            raise AnalysisException(
                'clang-tidy',
                'Mach static analysis failed: {}'.format(e.output))

        clang_output = clang.stdout.decode('utf-8')

        # Dump raw clang-tidy output as a Taskcluster artifact (for debugging)
        clang_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-clang-tidy.txt'.format(repr(revision)),
        )
        with open(clang_output_path, 'w') as f:
            f.write(clang_output)

        issues = self.parse_issues(clang_output, revision)

        # Report stats for these issues
        stats.report_issues('clang-tidy', issues)

        return issues
コード例 #2
0
ファイル: lint.py プロジェクト: La0/mozilla-relengapi
    def run(self, revision):
        '''
        List all issues found by mozlint on specified files
        '''
        assert isinstance(revision, Revision)

        issues = list(itertools.chain.from_iterable([
            self.find_issues(path, revision) or []
            for path in revision.files
        ]))

        stats.report_issues('mozlint', issues)

        return issues
コード例 #3
0
    def run(self, revision):
        '''
        List all issues found by mozlint on specified files
        '''
        assert isinstance(revision, Revision)

        issues = list(itertools.chain.from_iterable([
            self.find_issues(path, revision) or []
            for path in revision.files
        ]))

        stats.report_issues('mozlint', issues)

        return issues
コード例 #4
0
ファイル: format.py プロジェクト: tomoguara/release-services
    def run_clang_format(self, filename, revision):
        '''
        Clang-format is very fast, no need for a worker queue here
        '''
        # Check file exists (before mode)
        full_path = os.path.join(settings.repo_dir, filename)
        if not os.path.exists(full_path):
            logger.info('Modified file not found {}'.format(full_path))
            return []

        # Build command line for a filename
        cmd = [
            self.binary,

            # Use style from directories
            '-style=file',
            full_path,
        ]
        logger.info('Running clang-format', cmd=' '.join(cmd))

        # Run command
        clang_output = subprocess.check_output(
            cmd, cwd=settings.repo_dir).decode('utf-8')

        # Dump raw clang-format output as a Taskcluster artifact (for debugging)
        clang_output_path = os.path.join(
            settings.taskcluster_results_dir,
            '{}-clang-format.txt'.format(repr(revision)),
        )
        with open(clang_output_path, 'w') as f:
            f.write(clang_output)

        # Compare output with original file
        src_lines = [x.rstrip('\n') for x in open(full_path).readlines()]
        clang_lines = clang_output.split('\n')

        # Build issues from diff of diff !
        diff = difflib.SequenceMatcher(
            a=src_lines,
            b=clang_lines,
        )
        issues = [
            ClangFormatIssue(filename, src_lines, clang_lines, opcode,
                             revision) for opcode in diff.get_opcodes()
            if opcode[0] in OPCODES
        ]

        stats.report_issues('clang-format', issues)
        return issues
コード例 #5
0
    def run(self, revision):
        '''
        Run modified files with specified checks through infer
        using threaded workers (communicate through queues)
        Output a list of InferIssue
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        with AndroidConfig():
            # Mach pre-setup with mozconfig
            logger.info('Mach configure for infer...')
            run_check(['gecko-env', './mach', 'configure'],
                      cwd=settings.repo_dir)

            # Run all files in a single command
            # through mach static-analysis
            cmd = [
                'gecko-env', './mach', '--log-no-times', 'static-analysis',
                'check-java'
            ] + list(revision.files)
            logger.info('Running static-analysis', cmd=' '.join(cmd))

            # Run command
            try:
                infer_output = subprocess.check_output(cmd,
                                                       cwd=settings.repo_dir)
            except subprocess.CalledProcessError as e:
                raise AnalysisException(
                    'infer',
                    'Mach static analysis failed: {}'.format(e.output))

        report_file = os.path.join(settings.repo_dir, 'infer-out',
                                   'report.json')
        infer_output = json.load(open(report_file))

        # Dump raw infer output as a Taskcluster artifact (for debugging)
        infer_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-infer.txt'.format(repr(revision)),
        )
        with open(infer_output_path, 'w') as f:
            f.write(json.dumps(infer_output, indent=2))
        issues = self.parse_issues(infer_output, revision)

        # Report stats for these issues
        stats.report_issues('infer', issues)
        return issues
コード例 #6
0
ファイル: infer.py プロジェクト: La0/mozilla-relengapi
    def run(self, revision):
        '''
        Run modified files with specified checks through infer
        using threaded workers (communicate through queues)
        Output a list of InferIssue
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        with AndroidConfig():
            # Mach pre-setup with mozconfig
            logger.info('Mach configure for infer...')
            run_check(['gecko-env', './mach', 'configure'],
                      cwd=settings.repo_dir)

            # Run all files in a single command
            # through mach static-analysis
            cmd = [
                'gecko-env',
                './mach', '--log-no-times', 'static-analysis', 'check-java'
            ] + list(revision.files)
            logger.info('Running static-analysis', cmd=' '.join(cmd))

            # Run command
            try:
                infer_output = subprocess.check_output(cmd, cwd=settings.repo_dir)
            except subprocess.CalledProcessError as e:
                raise AnalysisException('infer', 'Mach static analysis failed: {}'.format(e.output))

        report_file = os.path.join(settings.repo_dir, 'infer-out', 'report.json')
        infer_output = json.load(open(report_file))

        # Dump raw infer output as a Taskcluster artifact (for debugging)
        infer_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-infer.txt'.format(repr(revision)),
        )
        with open(infer_output_path, 'w') as f:
            f.write(json.dumps(infer_output, indent=2))
        issues = self.parse_issues(infer_output, revision)

        # Report stats for these issues
        stats.report_issues('infer', issues)
        return issues
コード例 #7
0
ファイル: coverage.py プロジェクト: La0/mozilla-relengapi
    def run(self, revision):
        '''
        List all issues found by coverage analysis on specified files
        '''
        assert isinstance(revision, Revision)

        # Download zero coverage report.
        r = requests.get('https://index.taskcluster.net/v1/task/project.releng.services.project.production.code_coverage_bot.latest/artifacts/public/zero_coverage_report.json')  # noqa
        r.raise_for_status()
        report = r.json()
        zero_coverage_files = set(file_info['name'] for file_info in report['files'] if file_info['uncovered'])

        issues = [
            CoverageIssue(path, 0, 'This file is uncovered', revision)
            for path in revision.files
            if path in zero_coverage_files
        ]

        stats.report_issues('coverage', issues)

        return issues
コード例 #8
0
ファイル: coverage.py プロジェクト: clokep/release-services
    def run(self, revision):
        '''
        List all issues found by coverage analysis on specified files
        '''
        assert isinstance(revision, Revision)

        # Download zero coverage report.
        r = requests.get(
            'https://index.taskcluster.net/v1/task/project.releng.services.project.production.code_coverage_bot.latest/artifacts/public/zero_coverage_report.json'
        )  # noqa
        r.raise_for_status()
        report = r.json()
        zero_coverage_files = set(file_info['name']
                                  for file_info in report['files']
                                  if file_info['uncovered'])

        issues = [
            CoverageIssue(path, 0, 'This file is uncovered', revision)
            for path in revision.files if path in zero_coverage_files
        ]

        stats.report_issues('coverage', issues)

        return issues
コード例 #9
0
    def run(self, revision):
        '''
        Run ./mach clang-format on all of the C/C++ files from the patch
        '''
        assert isinstance(revision, Revision)

        cmd = ['gecko-env', './mach', '--log-no-times', 'clang-format', '-p']

        # Returns a list of eligible files for format
        def get_eligible_files():
            files = []
            # Append to the files list each C/C++ file for format
            for file in revision.files:
                # Verify if file is clang-format compliant, meaning that's a C/C++
                _, ext = os.path.splitext(file)
                if ext.lower() in settings.cpp_extensions:
                    files.append(file)
            return files

        files_to_format = get_eligible_files()

        if not files_to_format:
            logger.info('No eligible files found to format.')
            return []

        # Append to the cmd the files that will be formatted
        cmd += files_to_format

        # Run command and commit the current revision for `./mach clang-format ...` to reformat its changes
        logger.info('Running ./mach clang-format', cmd=' '.join(cmd))
        clang_output = subprocess.check_output(
            cmd, cwd=settings.repo_dir).decode('utf-8')

        # Dump raw clang-format output as a Taskcluster artifact (for debugging)
        clang_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-clang-format.txt'.format(repr(revision)),
        )
        with open(clang_output_path, 'w') as f:
            f.write(clang_output)

        # Look for any fixes `./mach clang-format` may have found
        # on allowed files
        allowed_paths = [
            os.path.join(settings.repo_dir,
                         path).encode('utf-8')  # needed for hglib
            for path in filter(settings.is_allowed_path, revision.files)
        ]
        client = hglib.open(settings.repo_dir)
        self.diff = client.diff(files=allowed_paths, unified=8).decode('utf-8')

        if not self.diff:
            return []

        # Store that diff as an improvement patch sent to devs
        revision.add_improvement_patch('clang-format', self.diff)

        # Generate a reverse diff for `parsepatch` (in order to get original
        # line numbers from the dev's patch instead of new line numbers)
        reverse_diff = client.diff(unified=8, reverse=True).decode('utf-8')

        # List all the lines that were fixed by `./mach clang-format`
        patch = Patch.parse_patch(reverse_diff, skip_comments=False)
        assert patch != {}, \
            'Empty patch'

        # Build `ClangFormatIssue`s
        issues = []
        for filename, diff in patch.items():
            lines = sorted(diff.get('touched', []) + diff.get('added', []))

            # Group consecutive lines together (algorithm by calixte)
            groups = []
            group = [lines[0]]
            for line in lines[1:]:
                # If the line is not consecutive with the group, start a new
                # group
                if line != group[-1] + 1:
                    groups.append(group)
                    group = []
                group.append(line)

            # Don't forget to add the last group
            groups.append(group)

            issues += [
                ClangFormatIssue(filename, group[0], len(group), revision)
                for group in groups
            ]

        stats.report_issues('clang-format', issues)
        return issues
コード例 #10
0
ファイル: format.py プロジェクト: La0/mozilla-relengapi
    def run(self, revision):
        '''
        Run ./mach clang-format on all of the C/C++ files from the patch
        '''
        assert isinstance(revision, Revision)

        cmd = [
            'gecko-env',
            './mach', '--log-no-times', 'clang-format', '-p'
        ]

        # Returns a list of eligible files for format
        def get_eligible_files():
            files = []
            # Append to the files list each C/C++ file for format
            for file in revision.files:
                # Verify if file is clang-format compliant, meaning that's a C/C++
                _, ext = os.path.splitext(file)
                if ext.lower() in frozenset.union(settings.cpp_extensions, settings.cpp_header_extensions):
                    files.append(file)
            return files

        files_to_format = get_eligible_files()

        if not files_to_format:
            logger.info('No eligible files found to format.')
            return []

        # Append to the cmd the files that will be formatted
        cmd += files_to_format

        # Run command and commit the current revision for `./mach clang-format ...` to reformat its changes
        logger.info('Running ./mach clang-format', cmd=' '.join(cmd))
        clang_output = subprocess.check_output(
            cmd, cwd=settings.repo_dir).decode('utf-8')

        # Dump raw clang-format output as a Taskcluster artifact (for debugging)
        clang_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-clang-format.txt'.format(repr(revision)),
        )
        with open(clang_output_path, 'w') as f:
            f.write(clang_output)

        # Look for any fixes `./mach clang-format` may have found
        # on allowed files
        allowed_paths = [
            os.path.join(settings.repo_dir, path).encode('utf-8')  # needed for hglib
            for path in filter(settings.is_allowed_path, revision.files)
        ]
        client = hglib.open(settings.repo_dir)
        self.diff = client.diff(files=allowed_paths, unified=8).decode('utf-8')

        if not self.diff:
            return []

        # Store that diff as an improvement patch sent to devs
        revision.add_improvement_patch('clang-format', self.diff)

        # Generate a reverse diff for `parsepatch` (in order to get original
        # line numbers from the dev's patch instead of new line numbers)
        reverse_diff = client.diff(unified=8, reverse=True).decode('utf-8')

        # List all the lines that were fixed by `./mach clang-format`
        patch = Patch.parse_patch(reverse_diff, skip_comments=False)
        assert patch != {}, \
            'Empty patch'

        # Build `ClangFormatIssue`s
        issues = []
        for filename, diff in patch.items():
            lines = sorted(diff.get('touched', []) + diff.get('added', []))

            # Group consecutive lines together (algorithm by calixte)
            groups = []
            group = [lines[0]]
            for line in lines[1:]:
                # If the line is not consecutive with the group, start a new
                # group
                if line != group[-1] + 1:
                    groups.append(group)
                    group = []
                group.append(line)

            # Don't forget to add the last group
            groups.append(group)

            issues += [
                ClangFormatIssue(filename, g[0], len(g), revision)
                for g in groups
            ]

        stats.report_issues('clang-format', issues)
        return issues
コード例 #11
0
ファイル: coverity.py プロジェクト: clokep/release-services
    def run(self, revision):
        '''
        Run coverity
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        # Based on our previous configs we should already have generated compile_commands.json
        self.compile_commands_path = os.path.join(settings.repo_dir,
                                                  'obj-x86_64-pc-linux-gnu',
                                                  'compile_commands.json')

        assert os.path.exists(self.compile_commands_path), \
            'Missing Coverity in {}'.format(self.compile_commands_path)

        logger.info('Building command files from compile_commands.json')

        # Retrieve the revision files with build commands associated
        commands_list = self.get_files_with_commands()
        assert commands_list is not [], 'Commands List is empty'
        logger.info('Built commands for {} files'.format(len(commands_list)))

        if len(commands_list) == 0:
            logger.info('Coverity didn\'t find any compilation units to use.')
            return []

        cmd = ['gecko-env', self.cov_run_desktop, '--setup']
        logger.info('Running Coverity Setup', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Setup failed!')

        cmd = ['gecko-env', self.cov_configure, '--clang']
        logger.info('Running Coverity Configure', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Configure failed!')

        # For each element in commands_list run `cov-translate`
        for element in commands_list:
            cmd = [
                'gecko-env', self.cov_translate, '--dir', self.cov_idir_path,
                element['command']
            ]
            logger.info('Running Coverity Tranlate', cmd=cmd)
            try:
                run_check(cmd, cwd=element['directory'])
            except click.ClickException:
                raise AnalysisException('coverity',
                                        'Coverity Translate failed!')

        # Once the capture is performed we need to do the actual Coverity Desktop analysis
        cmd = [
            'gecko-env', self.cov_run_desktop, '--json-output-v6',
            'cov-results.json', '--strip-path', settings.repo_dir
        ]
        cmd += [element['file'] for element in commands_list]
        logger.info('Running Coverity Analysis', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_state_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Analysis failed!')

        # Write the results.json to the artifact directory to have it later on for debug
        coverity_results_path = os.path.join(self.cov_state_path,
                                             'cov-results.json')
        coverity_results_path_on_tc = os.path.join(
            settings.taskcluster.results_dir, 'cov-results.json')

        shutil.copyfile(coverity_results_path, coverity_results_path_on_tc)

        # Parsing the issues from coverity_results_path
        logger.info('Parsing Coverity issues')
        issues = self.return_issues(coverity_results_path, revision)

        # Report stats for these issues
        stats.report_issues('coverity', issues)

        return issues
コード例 #12
0
ファイル: format.py プロジェクト: isacben/release-services
    def run(self, revision):
        '''
        Run ./mach clang-format on the current patch
        '''
        assert isinstance(revision, Revision)

        # Commit the current revision for `./mach clang-format` to reformat its changes
        cmd = [
            'gecko-env',
            './mach',
            '--log-no-times',
            'clang-format',
        ]
        logger.info('Running ./mach clang-format', cmd=' '.join(cmd))

        # Run command
        clang_output = subprocess.check_output(
            cmd, cwd=settings.repo_dir).decode('utf-8')

        # Dump raw clang-format output as a Taskcluster artifact (for debugging)
        clang_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-clang-format.txt'.format(repr(revision)),
        )
        with open(clang_output_path, 'w') as f:
            f.write(clang_output)

        # Look for any fixes `./mach clang-format` may have found
        client = hglib.open(settings.repo_dir)
        self.diff = client.diff(unified=8).decode('utf-8')

        if not self.diff:
            return []

        # Store that diff as an improvement patch sent to devs
        revision.add_improvement_patch('clang-format', self.diff)

        # Generate a reverse diff for `parsepatch` (in order to get original
        # line numbers from the dev's patch instead of new line numbers)
        reverse_diff = client.diff(unified=8, reverse=True).decode('utf-8')

        # List all the lines that were fixed by `./mach clang-format`
        patch = Patch.parse_patch(reverse_diff, skip_comments=False)
        assert patch != {}, \
            'Empty patch'

        # Build `ClangFormatIssue`s
        issues = []
        for filename, diff in patch.items():
            lines = sorted(diff.get('touched', []) + diff.get('added', []))

            # Group consecutive lines together (algorithm by calixte)
            groups = []
            group = [lines[0]]
            for line in lines[1:]:
                # If the line is not consecutive with the group, start a new
                # group
                if line != group[-1] + 1:
                    groups.append(group)
                    group = []
                group.append(line)

            # Don't forget to add the last group
            groups.append(group)

            issues += [
                ClangFormatIssue(filename, group[0], len(group), revision)
                for group in groups
            ]

        stats.report_issues('clang-format', issues)
        return issues
コード例 #13
0
ファイル: coverity.py プロジェクト: La0/mozilla-relengapi
    def run(self, revision):
        '''
        Run coverity
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        # Based on our previous configs we should already have generated compile_commands.json
        self.compile_commands_path = os.path.join(settings.repo_dir, 'obj-x86_64-pc-linux-gnu', 'compile_commands.json')

        assert os.path.exists(self.compile_commands_path), \
            'Missing Coverity in {}'.format(self.compile_commands_path)

        logger.info('Building command files from compile_commands.json')

        # Retrieve the revision files with build commands associated
        commands_list = self.get_files_with_commands()
        assert commands_list is not [], 'Commands List is empty'
        logger.info('Built commands for {} files'.format(len(commands_list)))

        if len(commands_list) == 0:
            logger.info('Coverity didn\'t find any compilation units to use.')
            return []

        cmd = ['gecko-env', self.cov_run_desktop, '--setup']
        logger.info('Running Coverity Setup', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Setup failed!')

        cmd = ['gecko-env', self.cov_configure, '--clang']
        logger.info('Running Coverity Configure', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Configure failed!')

        # For each element in commands_list run `cov-translate`
        for element in commands_list:
            cmd = [
                'gecko-env', self.cov_translate, '--dir', self.cov_idir_path,
                element['command']
            ]
            logger.info('Running Coverity Tranlate', cmd=cmd)
            try:
                run_check(cmd, cwd=element['directory'])
            except click.ClickException:
                raise AnalysisException('coverity', 'Coverity Translate failed!')

        # Once the capture is performed we need to do the actual Coverity Desktop analysis
        cmd = [
            'gecko-env', self.cov_run_desktop, '--json-output-v6',
            'cov-results.json', '--strip-path', settings.repo_dir
        ]
        cmd += [element['file'] for element in commands_list]
        logger.info('Running Coverity Analysis', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_state_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Analysis failed!')

        # Write the results.json to the artifact directory to have it later on for debug
        coverity_results_path = os.path.join(self.cov_state_path, 'cov-results.json')
        coverity_results_path_on_tc = os.path.join(settings.taskcluster.results_dir, 'cov-results.json')

        shutil.copyfile(coverity_results_path, coverity_results_path_on_tc)

        # Parsing the issues from coverity_results_path
        logger.info('Parsing Coverity issues')
        issues = self.return_issues(coverity_results_path, revision)

        # Report stats for these issues
        stats.report_issues('coverity', issues)

        return issues