Beispiel #1
0
    def run(self, revision):
        '''
        Run the local static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analyzers
        '''
        analyzers = []

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=settings.taskcluster.task_id,
            taskcluster_run=settings.taskcluster.run_id,
            channel=settings.app_channel,
            publication=settings.publication.name,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(settings.app_channel, revision),
            text='Task {} #{}'.format(settings.taskcluster.task_id, settings.taskcluster.run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            try:
                # Clone in a controllable process
                # and kill this new process if it exceeds the maximum allowed runtime
                clone = multiprocessing.Process(target=self.clone, args=(revision, ))
                clone.start()
                clone.join(settings.max_clone_runtime)
                if clone.is_alive():
                    logger.error('Clone watchdog expired, stopping immediately')

                    # Kill the clone process
                    clone.terminate()

                    # Stop workflow
                    raise AnalysisException('watchdog', 'Clone watchdog expired')

                # Open a mercurial client in main process
                self.hg = self.open_repository()

                # Start by cloning the mercurial repository
                self.parent.index(revision, state='cloned')

                # Force cleanup to reset top of MU
                # otherwise previous pull are there
                self.hg.update(rev=self.top_revision, clean=True)
                logger.info('Set repo back to Mozilla unified top', rev=self.hg.identify())
            except hglib.error.CommandError as e:
                raise AnalysisException('mercurial', str(e))

            # Load and analyze revision patch
            revision.load(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ or Java files
            if revision.has_clang_files:
                self.do_build_setup()

                # Download clang build from Taskcluster
                # Use new clang-tidy paths, https://bugzilla.mozilla.org/show_bug.cgi?id=1495641
                logger.info('Setup Taskcluster clang build...')
                setup_clang(repository='mozilla-inbound', revision='revision.874a07fdb045b725edc2aaa656a8620ff439ec10')

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

                # Run Coverity Scan
                if COVERITY in self.analyzers:
                    logger.info('Setup Taskcluster coverity build...')
                    try:
                        setup_coverity(self.index_service)
                        analyzers.append(Coverity)
                    except Exception as e:
                        logger.error('Coverity setup failed, skipping analyzer.', error=str(e))
                else:
                    logger.info('Skip Coverity')

                if COVERAGE in self.analyzers:
                    analyzers.append(Coverage)
                else:
                    logger.info('Skip coverage analysis')

            if revision.has_infer_files:
                if INFER in self.analyzers:
                    analyzers.append(Infer)
                    logger.info('Setup Taskcluster infer build...')
                    setup_infer(self.index_service)
                else:
                    logger.info('Skip infer')

            if not (revision.has_clang_files or revision.has_infer_files):
                logger.info('No clang or java files detected, skipping mach, infer and clang-*')

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            with stats.api.timer('runtime.mach.lint'):
                out = run_check(cmd, cwd=settings.repo_dir)
            if 'error: problem with lint setup' in out.decode('utf-8'):
                raise AnalysisException('mach', 'Mach lint setup failed')

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        self.parent.index(revision, state='analyzing')
        with stats.api.timer('runtime.issues'):
            # Detect initial issues
            if settings.publication == Publication.BEFORE_AFTER:
                before_patch = self.detect_issues(analyzers, revision, True)
                logger.info('Detected {} issue(s) before patch'.format(len(before_patch)))
                stats.api.increment('analysis.issues.before', len(before_patch))
                revision.reset()

            # Apply patch
            revision.apply(self.hg)

            if settings.publication == Publication.BEFORE_AFTER and revision.has_clang_files \
                    and (revision.has_clang_header_files or revision.has_idl_files):
                self.do_build_setup()

            # Detect new issues
            issues = self.detect_issues(analyzers, revision)
            logger.info('Detected {} issue(s) after patch'.format(len(issues)))
            stats.api.increment('analysis.issues.after', len(issues))

            # Mark newly found issues
            if settings.publication == Publication.BEFORE_AFTER:
                for issue in issues:
                    issue.is_new = issue not in before_patch

        # Avoid duplicates
        # but still output a list to be compatible with LocalWorkflow
        return list(set(issues))
Beispiel #2
0
    def run(self, revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
         * Publish results
        '''
        analyzers = []

        # Index ASAP Taskcluster task for this revision
        self.index(revision, state='started')

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=settings.taskcluster.task_id,
            taskcluster_run=settings.taskcluster.run_id,
            channel=settings.app_channel,
            publication=settings.publication.name,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(settings.app_channel, revision),
            text='Task {} #{}'.format(settings.taskcluster.task_id, settings.taskcluster.run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            try:
                # Start by cloning the mercurial repository
                self.hg = self.clone()
                self.index(revision, state='cloned')

                # Force cleanup to reset top of MU
                # otherwise previous pull are there
                self.hg.update(rev=self.top_revision, clean=True)
                logger.info('Set repo back to Mozilla unified top', rev=self.hg.identify())
            except hglib.error.CommandError as e:
                raise AnalysisException('mercurial', str(e))

            # Load and analyze revision patch
            revision.load(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ or Java files
            if revision.has_clang_files:

                # Mach pre-setup with mozconfig
                try:
                    logger.info('Mach configure...')
                    with stats.api.timer('runtime.mach.configure'):
                        run_check(['gecko-env', './mach', 'configure'], cwd=settings.repo_dir)

                    logger.info('Mach compile db...')
                    with stats.api.timer('runtime.mach.build-backend'):
                        run_check(['gecko-env', './mach', 'build-backend', '--backend=CompileDB'], cwd=settings.repo_dir)

                    logger.info('Mach pre-export...')
                    with stats.api.timer('runtime.mach.pre-export'):
                        run_check(['gecko-env', './mach', 'build', 'pre-export'], cwd=settings.repo_dir)

                    logger.info('Mach export...')
                    with stats.api.timer('runtime.mach.export'):
                        run_check(['gecko-env', './mach', 'build', 'export'], cwd=settings.repo_dir)
                except Exception as e:
                    raise AnalysisException('mach', str(e))

                # Download clang build from Taskcluster
                # Use new clang-tidy paths, https://bugzilla.mozilla.org/show_bug.cgi?id=1495641
                logger.info('Setup Taskcluster clang build...')
                setup_clang(repository='mozilla-inbound', revision='revision.874a07fdb045b725edc2aaa656a8620ff439ec10')

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

            if revision.has_infer_files:
                if INFER in self.analyzers:
                    analyzers.append(Infer)
                    logger.info('Setup Taskcluster infer build...')
                    setup_infer(self.index_service)
                else:
                    logger.info('Skip infer')

            if not (revision.has_clang_files or revision.has_infer_files):
                logger.info('No clang or java files detected, skipping mach, infer and clang-*')

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            with stats.api.timer('runtime.mach.lint'):
                out = run_check(cmd, cwd=settings.repo_dir)
            if 'error: problem with lint setup' in out.decode('utf-8'):
                raise AnalysisException('mach', 'Mach lint setup failed')

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        self.index(revision, state='analyzing')
        with stats.api.timer('runtime.issues'):
            # Detect initial issues
            if settings.publication == Publication.BEFORE_AFTER:
                before_patch, _ = self.detect_issues(analyzers, revision)
                logger.info('Detected {} issue(s) before patch'.format(len(before_patch)))
                stats.api.increment('analysis.issues.before', len(before_patch))

            # Apply patch
            revision.apply(self.hg)

            # Detect new issues
            issues = self.detect_issues(analyzers, revision)
            logger.info('Detected {} issue(s) after patch'.format(len(issues)))
            stats.api.increment('analysis.issues.after', len(issues))

            # Mark newly found issues
            if settings.publication == Publication.BEFORE_AFTER:
                for issue in issues:
                    issue.is_new = issue not in before_patch

        # Avoid duplicates
        issues = set(issues)

        if not issues:
            logger.info('No issues, stopping there.')
            self.index(revision, state='done', issues=0)
            return

        # Report issues publication stats
        nb_issues = len(issues)
        nb_publishable = len([i for i in issues if i.is_publishable()])
        self.index(revision, state='analyzed', issues=nb_issues, issues_publishable=nb_publishable)
        stats.api.increment('analysis.issues.publishable', nb_publishable)

        # Publish reports about these issues
        with stats.api.timer('runtime.reports'):
            for reporter in self.reporters.values():
                reporter.publish(issues, revision)

        self.index(revision, state='done', issues=nb_issues, issues_publishable=nb_publishable)
Beispiel #3
0
    def run(self, revision):
        '''
        Run the local static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analyzers
        '''
        analyzers = []

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=settings.taskcluster.task_id,
            taskcluster_run=settings.taskcluster.run_id,
            channel=settings.app_channel,
            publication=settings.publication.name,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(
                settings.app_channel, revision),
            text='Task {} #{}'.format(settings.taskcluster.task_id,
                                      settings.taskcluster.run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            try:
                # Clone in a controllable process
                # and kill this new process if it exceeds the maximum allowed runtime
                clone = multiprocessing.Process(target=self.clone,
                                                args=(revision, ))
                clone.start()
                clone.join(settings.max_clone_runtime)
                if clone.is_alive():
                    logger.error(
                        'Clone watchdog expired, stopping immediately')

                    # Kill the clone process
                    clone.terminate()

                    # Stop workflow
                    raise AnalysisException('watchdog',
                                            'Clone watchdog expired')

                # Open a mercurial client in main process
                self.hg = self.open_repository()

                # Start by cloning the mercurial repository
                self.parent.index(revision, state='cloned')

                # Force cleanup to reset top of MU
                # otherwise previous pull are there
                self.hg.update(rev=self.top_revision, clean=True)
                logger.info('Set repo back to Mozilla unified top',
                            rev=self.hg.identify())
            except hglib.error.CommandError as e:
                raise AnalysisException('mercurial', str(e))

            # Load and analyze revision patch
            revision.load(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ or Java files
            if revision.has_clang_files:
                self.do_build_setup()

                # Download clang build from Taskcluster
                # Use new clang-tidy paths, https://bugzilla.mozilla.org/show_bug.cgi?id=1495641
                logger.info('Setup Taskcluster clang build...')
                setup_clang(
                    repository='mozilla-inbound',
                    revision='revision.874a07fdb045b725edc2aaa656a8620ff439ec10'
                )

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

                # Run Coverity Scan
                if COVERITY in self.analyzers:
                    logger.info('Setup Taskcluster coverity build...')
                    setup_coverity(self.index_service)
                    analyzers.append(Coverity)
                else:
                    logger.info('Skip Coverity')

                if COVERAGE in self.analyzers:
                    analyzers.append(Coverage)
                else:
                    logger.info('Skip coverage analysis')

            if revision.has_infer_files:
                if INFER in self.analyzers:
                    analyzers.append(Infer)
                    logger.info('Setup Taskcluster infer build...')
                    setup_infer(self.index_service)
                else:
                    logger.info('Skip infer')

            if not (revision.has_clang_files or revision.has_infer_files):
                logger.info(
                    'No clang or java files detected, skipping mach, infer and clang-*'
                )

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            with stats.api.timer('runtime.mach.lint'):
                out = run_check(cmd, cwd=settings.repo_dir)
            if 'error: problem with lint setup' in out.decode('utf-8'):
                raise AnalysisException('mach', 'Mach lint setup failed')

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        self.parent.index(revision, state='analyzing')
        with stats.api.timer('runtime.issues'):
            # Detect initial issues
            if settings.publication == Publication.BEFORE_AFTER:
                before_patch = self.detect_issues(analyzers, revision, True)
                logger.info('Detected {} issue(s) before patch'.format(
                    len(before_patch)))
                stats.api.increment('analysis.issues.before',
                                    len(before_patch))
                revision.reset()

            # Apply patch
            revision.apply(self.hg)

            if settings.publication == Publication.BEFORE_AFTER and revision.has_clang_files \
                    and (revision.has_clang_header_files or revision.has_idl_files):
                self.do_build_setup()

            # Detect new issues
            issues = self.detect_issues(analyzers, revision)
            logger.info('Detected {} issue(s) after patch'.format(len(issues)))
            stats.api.increment('analysis.issues.after', len(issues))

            # Mark newly found issues
            if settings.publication == Publication.BEFORE_AFTER:
                for issue in issues:
                    issue.is_new = issue not in before_patch

        # Avoid duplicates
        # but still output a list to be compatible with LocalWorkflow
        return list(set(issues))