Esempio n. 1
0
    def rewrite_jsvm_lcov(self, zip_file_path):
        with self.build_finished_cv:
            while not self.build_finished:
                self.build_finished_cv.wait()

        out_dir = zip_file_path[:-4]

        zip_file = zipfile.ZipFile(zip_file_path, 'r')
        zip_file.extractall(out_dir)
        zip_file.close()

        lcov_files = [
            os.path.abspath(os.path.join(out_dir, f))
            for f in os.listdir(out_dir)
        ]
        run_check([
            'gecko-env', './mach', 'python',
            'python/mozbuild/mozbuild/codecoverage/lcov_rewriter.py'
        ] + lcov_files,
                  cwd=self.repo_dir)

        for lcov_file in lcov_files:
            os.remove(lcov_file)

        lcov_out_files = [
            os.path.abspath(os.path.join(out_dir, f))
            for f in os.listdir(out_dir)
        ]
        for lcov_out_file in lcov_out_files:
            os.rename(lcov_out_file, lcov_out_file[:-4])
Esempio n. 2
0
    def generate_suite_report(self, suite):
        output = grcov.report(self.artifactsHandler.get(suite=suite),
                              out_format='lcov')

        info_file = '%s.info' % suite

        with open(info_file, 'wb') as f:
            f.write(output)

        run_check([
            'genhtml', '-o',
            os.path.join(os.getcwd(), suite), '--show-details', '--highlight',
            '--ignore-errors', 'source', '--legend',
            os.path.join(os.getcwd(), info_file), '--prefix', self.repo_dir
        ],
                  cwd=self.repo_dir)

        os.remove('%s.info' % suite)

        with tarfile.open('code-coverage-reports/%s.tar.xz' % suite,
                          'w:xz') as tar:
            tar.add(suite)
        shutil.rmtree(os.path.join(os.getcwd(), suite))

        logger.info('Suite report generated', suite=suite)
def generate(suites, artifactsHandler, ccov_reports_dir, repo_dir):
    for suite in suites:
        output = grcov.report(artifactsHandler.get(suite=suite), out_format='lcov')

        info_file = os.path.join(ccov_reports_dir, '%s.info' % suite)

        with open(info_file, 'wb') as f:
            f.write(output)

        suite_dir = os.path.join(ccov_reports_dir, suite)
        run_check([
            'genhtml',
            '-o', suite_dir,
            '--show-details', '--highlight', '--ignore-errors', 'source',
            '--legend', info_file,
            '--prefix', repo_dir
        ], cwd=repo_dir)

        os.remove(info_file)

        with tarfile.open(os.path.join(ccov_reports_dir, '%s.tar.xz' % suite), 'w:xz') as tar:
            tar.add(suite_dir, arcname=suite)
        shutil.rmtree(suite_dir)

        logger.info('Suite report generated', suite=suite)
Esempio n. 4
0
    def build_files(self):
        with open(os.path.join(self.repo_dir, '.mozconfig'), 'w') as f:
            f.write('mk_add_options MOZ_OBJDIR=@TOPSRCDIR@/obj-firefox\n')
            f.write('ac_add_options --enable-debug\n')
            f.write('ac_add_options --enable-artifact-builds\n')

        run_check(['gecko-env', './mach', 'build'], cwd=self.repo_dir)
        run_check(['gecko-env', './mach', 'build-backend', '-b', 'ChromeMap'],
                  cwd=self.repo_dir)
Esempio n. 5
0
    def update_github_repo(self):
        run_check(['git', 'config', '--global', 'http.postBuffer', '12M'])
        repo_url = 'https://%s:%[email protected]/marco-c/gecko-dev' % (self.gecko_dev_user, self.gecko_dev_pwd)
        repo_path = os.path.join(self.cache_root, 'gecko-dev')

        if not os.path.isdir(repo_path):
            retry(lambda: run_check(['git', 'clone', repo_url], cwd=self.cache_root))
        retry(lambda: run_check(['git', 'pull', 'https://github.com/mozilla/gecko-dev', 'master'], cwd=repo_path))
        retry(lambda: run_check(['git', 'push', repo_url, 'master'], cwd=repo_path))
Esempio n. 6
0
        def generate_suite_report(suite):
            output = self.generate_info(suite=suite, out_format='lcov')

            self.generate_report(output, suite)
            os.remove('%s.info' % suite)

            run_check(['tar', '-cjf', 'code-coverage-reports/%s.tar.bz2' % suite, suite])
            shutil.rmtree(os.path.join(os.getcwd(), suite))

            logger.info('Suite report generated', suite=suite)
Esempio n. 7
0
    def update_geckodev_repo(self):
        if self.gecko_dev_user is None or self.gecko_dev_pwd is None:
            return

        run_check(['git', 'config', '--global', 'http.postBuffer', '12M'])
        repo_url = 'https://%s:%[email protected]/marco-c/gecko-dev' % (self.gecko_dev_user, self.gecko_dev_pwd)
        repo_path = os.path.join(self.cache_root, 'gecko-dev')

        if not os.path.isdir(repo_path):
            retry(lambda: run_check(['git', 'clone', repo_url], cwd=self.cache_root))
        retry(lambda: run_check(['git', 'pull', 'https://github.com/mozilla/gecko-dev', 'master'], cwd=repo_path))
        retry(lambda: run_check(['git', 'push', repo_url, 'master'], cwd=repo_path))
Esempio n. 8
0
    def generate_report(self, output, suite):
        info_file = '%s.info' % suite

        with open(info_file, 'wb') as f:
            f.write(output)

        run_check([
            'genhtml', '-o',
            os.path.join(os.getcwd(), suite), '--show-details', '--highlight',
            '--ignore-errors', 'source', '--legend',
            os.path.join(os.getcwd(), info_file), '--prefix', self.repo_dir
        ],
                  cwd=self.repo_dir)
Esempio n. 9
0
    def build_files(self):
        with open(os.path.join(self.repo_dir, '.mozconfig'), 'w') as f:
            f.write('mk_add_options MOZ_OBJDIR=@TOPSRCDIR@/obj-firefox\n')
            f.write('ac_add_options --enable-debug\n')
            f.write('ac_add_options --enable-artifact-builds\n')

        retry(lambda: run_check(['gecko-env', './mach', 'build'], cwd=self.repo_dir))
        retry(lambda: run_check(['gecko-env', './mach', 'build-backend', '-b', 'ChromeMap'], cwd=self.repo_dir))

        logger.info('Build successful')

        self.build_finished = True
        with self.build_finished_cv:
            self.build_finished_cv.notify_all()
Esempio n. 10
0
    def do_build_setup(self):
        # Mach pre-setup with mozconfig
        try:
            logger.info('Mach delete any existing obj dir')
            obj_dir = os.path.join(settings.repo_dir, 'obj-x86_64-pc-linux-gnu')
            if (os.path.exists(obj_dir)):
                shutil.rmtree(obj_dir)

            logger.info('Mach configure...')
            with stats.api.timer('runtime.mach.configure'):
                run_check(['gecko-env', './mach', 'configure'], cwd=settings.repo_dir)

            logger.info('Mach compile db...')
            with stats.api.timer('runtime.mach.build-backend'):
                run_check(['gecko-env', './mach', 'build-backend', '--backend=CompileDB'], cwd=settings.repo_dir)

            logger.info('Mach pre-export...')
            with stats.api.timer('runtime.mach.pre-export'):
                run_check(['gecko-env', './mach', 'build', 'pre-export'], cwd=settings.repo_dir)

            logger.info('Mach export...')
            with stats.api.timer('runtime.mach.export'):
                run_check(['gecko-env', './mach', 'build', 'export'], cwd=settings.repo_dir)
        except Exception as e:
            raise AnalysisException('mach', str(e))
Esempio n. 11
0
    def do_build_setup(self):
        # Mach pre-setup with mozconfig
        try:
            logger.info('Mach delete any existing obj dir')
            obj_dir = os.path.join(settings.repo_dir,
                                   'obj-x86_64-pc-linux-gnu')
            if (os.path.exists(obj_dir)):
                shutil.rmtree(obj_dir)

            logger.info('Mach configure...')
            with stats.api.timer('runtime.mach.configure'):
                run_check(['gecko-env', './mach', 'configure'],
                          cwd=settings.repo_dir)

            logger.info('Mach compile db...')
            with stats.api.timer('runtime.mach.build-backend'):
                run_check([
                    'gecko-env', './mach', 'build-backend',
                    '--backend=CompileDB'
                ],
                          cwd=settings.repo_dir)

            logger.info('Mach pre-export...')
            with stats.api.timer('runtime.mach.pre-export'):
                run_check(['gecko-env', './mach', 'build', 'pre-export'],
                          cwd=settings.repo_dir)

            logger.info('Mach export...')
            with stats.api.timer('runtime.mach.export'):
                run_check(['gecko-env', './mach', 'build', 'export'],
                          cwd=settings.repo_dir)
        except Exception as e:
            raise AnalysisException('mach', str(e))
Esempio n. 12
0
    def run(self, revision):
        '''
        Run modified files with specified checks through infer
        using threaded workers (communicate through queues)
        Output a list of InferIssue
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        with AndroidConfig():
            # Mach pre-setup with mozconfig
            logger.info('Mach configure for infer...')
            run_check(['gecko-env', './mach', 'configure'],
                      cwd=settings.repo_dir)

            # Run all files in a single command
            # through mach static-analysis
            cmd = [
                'gecko-env', './mach', '--log-no-times', 'static-analysis',
                'check-java'
            ] + list(revision.files)
            logger.info('Running static-analysis', cmd=' '.join(cmd))

            # Run command
            try:
                infer_output = subprocess.check_output(cmd,
                                                       cwd=settings.repo_dir)
            except subprocess.CalledProcessError as e:
                raise AnalysisException(
                    'infer',
                    'Mach static analysis failed: {}'.format(e.output))

        report_file = os.path.join(settings.repo_dir, 'infer-out',
                                   'report.json')
        infer_output = json.load(open(report_file))

        # Dump raw infer output as a Taskcluster artifact (for debugging)
        infer_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-infer.txt'.format(repr(revision)),
        )
        with open(infer_output_path, 'w') as f:
            f.write(json.dumps(infer_output, indent=2))
        issues = self.parse_issues(infer_output, revision)

        # Report stats for these issues
        stats.report_issues('infer', issues)
        return issues
Esempio n. 13
0
    def generate_info(self, commit_sha=None, suite=None, chunk=None, out_format='coveralls', options=[]):
        cmd = [
          'grcov',
          '-t', out_format,
          '-s', self.repo_dir,
          '-p', '/home/worker/workspace/build/src/',
          '--ignore-dir', 'gcc',
          '--ignore-not-existing',
        ]

        if 'coveralls' in out_format:
            r = requests.get('https://hg.mozilla.org/mozilla-central/json-rev/%s' % self.revision)
            r.raise_for_status()
            push_id = r.json()['pushid']

            cmd.extend([
              '--service-name', 'TaskCluster',
              '--service-number', str(push_id),
              '--commit-sha', commit_sha,
              '--token', self.coveralls_token,
            ])

            if suite is not None:
                cmd.extend(['--service-job-number', str(self.suites.index(suite) + 1)])
            else:
                cmd.extend(['--service-job-number', '1'])

        cmd.extend(self.get_coverage_artifacts(suite, chunk))
        cmd.extend(options)

        return run_check(cmd)
Esempio n. 14
0
def report(artifacts, source_dir=None, service_number=None, commit_sha='unused', token='unused', out_format='coveralls', options=[]):
    cmd = [
      'grcov',
      '-t', out_format,
    ]

    if 'coveralls' in out_format:
        cmd.extend([
          '--service-name', 'TaskCluster',
          '--commit-sha', commit_sha,
          '--token', token,
          '--service-job-number', '1',
        ])

        if service_number is not None:
            cmd.extend(['--service-number', str(service_number)])

    if source_dir is not None:
        cmd.extend(['-s', source_dir])
        cmd.append('--ignore-not-existing')

    cmd.extend(artifacts)
    cmd.extend(options)

    try:
        return run_check(cmd)
    except Exception:
        logger.error('Error while running grcov')
        raise
Esempio n. 15
0
    def run(self, revision):
        '''
        Run modified files with specified checks through infer
        using threaded workers (communicate through queues)
        Output a list of InferIssue
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        with AndroidConfig():
            # Mach pre-setup with mozconfig
            logger.info('Mach configure for infer...')
            run_check(['gecko-env', './mach', 'configure'],
                      cwd=settings.repo_dir)

            # Run all files in a single command
            # through mach static-analysis
            cmd = [
                'gecko-env',
                './mach', '--log-no-times', 'static-analysis', 'check-java'
            ] + list(revision.files)
            logger.info('Running static-analysis', cmd=' '.join(cmd))

            # Run command
            try:
                infer_output = subprocess.check_output(cmd, cwd=settings.repo_dir)
            except subprocess.CalledProcessError as e:
                raise AnalysisException('infer', 'Mach static analysis failed: {}'.format(e.output))

        report_file = os.path.join(settings.repo_dir, 'infer-out', 'report.json')
        infer_output = json.load(open(report_file))

        # Dump raw infer output as a Taskcluster artifact (for debugging)
        infer_output_path = os.path.join(
            settings.taskcluster.results_dir,
            '{}-infer.txt'.format(repr(revision)),
        )
        with open(infer_output_path, 'w') as f:
            f.write(json.dumps(infer_output, indent=2))
        issues = self.parse_issues(infer_output, revision)

        # Report stats for these issues
        stats.report_issues('infer', issues)
        return issues
Esempio n. 16
0
    def rewrite_jsvm_lcov(self, zip_file_path):
        with self.build_finished_cv:
            while not self.build_finished:
                self.build_finished_cv.wait()

        out_dir = zip_file_path[:-4]
        out_file = out_dir + '.info'

        with ZipFile(zip_file_path, 'r') as z:
            z.extractall(out_dir)

        run_check([
            'gecko-env', './mach', 'python',
            'python/mozbuild/mozbuild/codecoverage/lcov_rewriter.py',
            os.path.abspath(out_dir),
            '--output-file', os.path.abspath(out_file)
        ], cwd=self.repo_dir)

        shutil.rmtree(out_dir)
Esempio n. 17
0
    def __init__(self, work_dir, build_dir):
        assert os.path.isdir(work_dir)

        self.work_dir = work_dir
        self.build_dir = os.path.join(work_dir, build_dir)

        # Dirty hack to skip Taskcluster proxy usage when loading artifacts
        if 'TASK_ID' in os.environ:
            del os.environ['TASK_ID']

        # Check the local clang is available
        logger.info('Loading Mozilla clang-tidy...')
        run_check(CLANG_SETUP_CMD, cwd=work_dir)
        self.binary = os.path.join(
            work_dir,
            'clang/bin/clang-tidy',
        )
        assert os.path.exists(self.binary), \
            'Missing clang tidy in {}'.format(self.binary)
Esempio n. 18
0
    def generate_info(self, commit_sha, coveralls_token, suite=None):
        files = os.listdir('ccov-artifacts')
        ordered_files = []
        for fname in files:
            if 'grcov' in fname and not fname.endswith('.zip'):
                continue
            if 'jsvm' in fname and fname.endswith('.zip'):
                continue

            if suite is None or suite in fname:
                ordered_files.append('ccov-artifacts/' + fname)

        r = requests.get('https://hg.mozilla.org/mozilla-central/json-rev/%s' %
                         self.revision)
        push_id = r.json()['pushid']

        cmd = [
            'grcov',
            '-t',
            'coveralls',
            '-s',
            self.repo_dir,
            '-p',
            '/home/worker/workspace/build/src/',
            '--ignore-dir',
            'gcc',
            '--ignore-not-existing',
            '--service-name',
            'TaskCluster',
            '--service-number',
            str(push_id),
            '--commit-sha',
            commit_sha,
            '--token',
            coveralls_token,
        ]

        if suite is not None:
            cmd.extend(
                ['--service-job-number',
                 str(self.suites.index(suite) + 1)])
        else:
            cmd.extend(['--service-job-number', '1'])

        cmd.extend(ordered_files)

        return run_check(cmd, cwd=os.getcwd())
Esempio n. 19
0
    def generate_info(self, commit_sha, coveralls_token, suite=None):
        files = os.listdir('ccov-artifacts')
        ordered_files = []
        for fname in files:
            if ('gcda' in fname
                    or 'gcno' in fname) and not fname.endswith('.zip'):
                continue
            if 'jsvm' in fname and fname.endswith('.zip'):
                continue

            if 'gcno' in fname or suite is None or suite in fname:
                ordered_files.append('ccov-artifacts/' + fname)

        cmd = [
            'grcov',
            '-t',
            'coveralls',
            '-s',
            self.repo_dir,
            '-p',
            '/home/worker/workspace/build/src/',
            '--ignore-dir',
            'gcc',
            '--ignore-not-existing',
            '--service-name',
            'TaskCluster',
            '--service-number',
            datetime.today().strftime('%Y%m%d'),
            '--commit-sha',
            commit_sha,
            '--token',
            coveralls_token,
        ]

        if suite is not None:
            cmd.extend(
                ['--service-job-number',
                 str(self.suites.index(suite) + 1)])
        else:
            cmd.extend(['--service-job-number', '1'])

        cmd.extend(ordered_files)

        return run_check(cmd, cwd=os.getcwd())
Esempio n. 20
0
def report(artifacts,
           source_dir=None,
           service_number=None,
           commit_sha='unused',
           token='unused',
           out_format='coveralls',
           options=[]):
    cmd = [
        'grcov',
        '-t',
        out_format,
        '-p',
        '/home/worker/workspace/build/src/',
    ]

    if 'coveralls' in out_format:
        cmd.extend([
            '--service-name',
            'TaskCluster',
            '--commit-sha',
            commit_sha,
            '--token',
            token,
            '--service-job-number',
            '1',
        ])

        if service_number is not None:
            cmd.extend(['--service-number', str(service_number)])

    if source_dir is not None:
        cmd.extend(['-s', source_dir])
        cmd.append('--ignore-not-existing')

    cmd.extend(artifacts)
    cmd.extend(options)

    try:
        return run_check(cmd)
    except Exception:
        logger.error('Error while running grcov')
        raise
Esempio n. 21
0
def report(artifacts,
           source_dir=None,
           service_number=None,
           commit_sha='unused',
           token='unused',
           out_format='coveralls',
           options=[]):
    cmd = [
        'grcov',
        '-t',
        out_format,
        '-p',
        '/home/worker/workspace/build/src/',
        '--ignore-dir',
        'gcc',
    ]

    if 'coveralls' in out_format:
        cmd.extend([
            '--service-name',
            'TaskCluster',
            '--commit-sha',
            commit_sha,
            '--token',
            token,
            '--service-job-number',
            '1',
        ])

        if service_number is not None:
            cmd.extend(['--service-number', str(service_number)])

    if source_dir is not None:
        cmd.extend(['-s', source_dir])

    cmd.extend(artifacts)
    cmd.extend(options)

    return run_check(cmd)
Esempio n. 22
0
    def update_codecoveragereports_repo(self):
        if self.gecko_dev_user is None or self.gecko_dev_pwd is None:
            return

        run_check(['git', 'config', '--global', 'http.postBuffer', '12M'])
        run_check(
            ['git', 'config', '--global', 'user.email', '*****@*****.**'])
        run_check(
            ['git', 'config', '--global', 'user.name', 'Report Uploader'])
        repo_url = 'https://%s:%[email protected]/marco-c/code-coverage-reports' % (
            self.gecko_dev_user, self.gecko_dev_pwd)
        run_check(['git', 'init'])
        run_check(['git', 'add', '*'])
        run_check(['git', 'commit', '-m', 'Coverage reports upload'])
        retry(
            lambda: run_check(['git', 'push', repo_url, 'master', '--force']))
Esempio n. 23
0
    def run(self, revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
         * Publish results
        '''
        assert revision.mercurial is not None, \
            'Cannot run without a mercurial revision'

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=self.taskcluster_task_id,
            taskcluster_run=self.taskcluster_run_id,
            channel=settings.app_channel,
            revision=revision,
        )

        # Setup tools (clang & mozlint)
        clang_tidy = CLANG_TIDY in self.analyzers and ClangTidy(
            self.repo_dir, settings.target)
        clang_format = CLANG_FORMAT in self.analyzers and ClangFormat(
            self.repo_dir)
        mozlint = MOZLINT in self.analyzers and MozLint(self.repo_dir)

        # Force cleanup to reset tip
        # otherwise previous pull are there
        self.hg.update(rev=b'tip', clean=True)

        # Pull revision from review
        self.hg.pull(source=REPO_REVIEW,
                     rev=revision.mercurial,
                     update=True,
                     force=True)

        # Update to the target revision
        self.hg.update(rev=revision.mercurial, clean=True)

        # Get the parents revisions
        parent_rev = 'parents({})'.format(revision.mercurial)
        parents = self.hg.identify(id=True,
                                   rev=parent_rev).decode('utf-8').strip()

        # Find modified files by this revision
        modified_files = []
        for parent in parents.split('\n'):
            changeset = '{}:{}'.format(parent, revision.mercurial)
            status = self.hg.status(change=[
                changeset,
            ])
            modified_files += [f.decode('utf-8') for _, f in status]
        logger.info('Modified files', files=modified_files)

        # List all modified lines from current revision changes
        patch = Patch.parse_patch(
            self.hg.diff(change=revision.mercurial, git=True).decode('utf-8'),
            skip_comments=False,
        )
        modified_lines = {
            # Use all changes in new files
            filename: diff.get('touched', []) + diff.get('added', [])
            for filename, diff in patch.items()
        }

        # mach configure with mozconfig
        logger.info('Mach configure...')
        run_check(['gecko-env', './mach', 'configure'], cwd=self.repo_dir)

        # Build CompileDB backend
        logger.info('Mach build backend...')
        cmd = ['gecko-env', './mach', 'build-backend', '--backend=CompileDB']
        run_check(cmd, cwd=self.repo_dir)

        # Build exports
        logger.info('Mach build exports...')
        run_check(['gecko-env', './mach', 'build', 'pre-export'],
                  cwd=self.repo_dir)
        run_check(['gecko-env', './mach', 'build', 'export'],
                  cwd=self.repo_dir)

        # Run static analysis through clang-tidy
        issues = []
        if clang_tidy:
            logger.info('Run clang-tidy...')
            issues += clang_tidy.run(settings.clang_checkers, modified_lines)
        else:
            logger.info('Skip clang-tidy')

        # Run clang-format on modified files
        diff_url = None
        if clang_format:
            logger.info('Run clang-format...')
            format_issues, patched = clang_format.run(settings.cpp_extensions,
                                                      modified_lines)
            issues += format_issues
            if patched:
                # Get current diff on these files
                logger.info('Found clang-format issues', files=patched)
                files = list(
                    map(
                        lambda x: os.path.join(self.repo_dir, x).encode(
                            'utf-8'), patched))
                diff = self.hg.diff(files)
                assert diff is not None and diff != b'', \
                    'Empty diff'

                # Write diff in results directory
                diff_path = os.path.join(self.taskcluster_results_dir,
                                         revision.build_diff_name())
                with open(diff_path, 'w') as f:
                    length = f.write(diff.decode('utf-8'))
                    logger.info('Diff from clang-format dumped',
                                path=diff_path,
                                length=length)  # noqa

                # Build diff download url
                diff_url = ARTIFACT_URL.format(
                    task_id=self.taskcluster_task_id,
                    run_id=self.taskcluster_run_id,
                    diff_name=revision.build_diff_name(),
                )
                logger.info('Diff available online', url=diff_url)
            else:
                logger.info('No clang-format issues')

        else:
            logger.info('Skip clang-format')

        # Run linter
        if mozlint:
            logger.info('Run mozlint...')
            issues += mozlint.run(modified_lines)
        else:
            logger.info('Skip mozlint')

        logger.info('Detected {} issue(s)'.format(len(issues)))
        if not issues:
            logger.info('No issues, stopping there.')
            return

        # Publish reports about these issues
        for reporter in self.reporters.values():
            reporter.publish(issues, revision, diff_url)
Esempio n. 24
0
    def run(self, revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
         * Publish results
        '''
        assert revision.mercurial is not None, \
            'Cannot run without a mercurial revision'

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=self.taskcluster_task_id,
            taskcluster_run=self.taskcluster_run_id,
            channel=settings.app_channel,
            revision=revision,
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(
                settings.app_channel, revision.mercurial[:12]),
            text='Task {} #{}\n{}'.format(self.taskcluster_task_id,
                                          self.taskcluster_run_id, revision),
        )
        stats.api.increment('analysis')

        # Setup tools (clang & mozlint)
        clang_tidy = CLANG_TIDY in self.analyzers and ClangTidy(
            self.repo_dir, settings.target)
        clang_format = CLANG_FORMAT in self.analyzers and ClangFormat(
            self.repo_dir)
        mozlint = MOZLINT in self.analyzers and MozLint(self.repo_dir)

        with stats.api.timer('runtime.mercurial'):
            # Force cleanup to reset tip
            # otherwise previous pull are there
            self.hg.update(rev=b'tip', clean=True)

            # Pull revision from review
            self.hg.pull(source=REPO_REVIEW,
                         rev=revision.mercurial,
                         update=True,
                         force=True)

            # Update to the target revision
            self.hg.update(rev=revision.mercurial, clean=True)

            # Analyze files in revision
            revision.analyze_files(self.hg)

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ files
            if revision.has_clang_files:
                # mach configure with mozconfig
                logger.info('Mach configure...')
                run_check(['gecko-env', './mach', 'configure'],
                          cwd=self.repo_dir)

                # Build CompileDB backend
                logger.info('Mach build backend...')
                cmd = [
                    'gecko-env', './mach', 'build-backend',
                    '--backend=CompileDB'
                ]
                run_check(cmd, cwd=self.repo_dir)

            else:
                logger.info('No clang files detected, skipping mach')

            # Setup python environment
            logger.info('Mach python setup...')
            cmd = ['gecko-env', './mach', 'python', '--version']
            run_check(cmd, cwd=self.repo_dir)

        # Run static analysis through clang-tidy
        issues = []
        if clang_tidy and revision.has_clang_files:
            logger.info('Run clang-tidy...')
            issues += clang_tidy.run(settings.clang_checkers, revision)
        else:
            logger.info('Skip clang-tidy')

        # Run clang-format on modified files
        diff_url = None
        if clang_format and revision.has_clang_files:
            logger.info('Run clang-format...')
            format_issues, patched = clang_format.run(settings.cpp_extensions,
                                                      revision)
            issues += format_issues
            if patched:
                # Get current diff on these files
                logger.info('Found clang-format issues', files=patched)
                files = list(
                    map(
                        lambda x: os.path.join(self.repo_dir, x).encode(
                            'utf-8'), patched))
                diff = self.hg.diff(files)
                assert diff is not None and diff != b'', \
                    'Empty diff'

                # Write diff in results directory
                diff_path = os.path.join(self.taskcluster_results_dir,
                                         revision.build_diff_name())
                with open(diff_path, 'w') as f:
                    length = f.write(diff.decode('utf-8'))
                    logger.info('Diff from clang-format dumped',
                                path=diff_path,
                                length=length)  # noqa

                # Build diff download url
                diff_url = ARTIFACT_URL.format(
                    task_id=self.taskcluster_task_id,
                    run_id=self.taskcluster_run_id,
                    diff_name=revision.build_diff_name(),
                )
                logger.info('Diff available online', url=diff_url)
            else:
                logger.info('No clang-format issues')

        else:
            logger.info('Skip clang-format')

        # Run linter
        if mozlint:
            logger.info('Run mozlint...')
            issues += mozlint.run(revision)
        else:
            logger.info('Skip mozlint')

        logger.info('Detected {} issue(s)'.format(len(issues)))
        if not issues:
            logger.info('No issues, stopping there.')
            return

        # Publish reports about these issues
        with stats.api.timer('runtime.reports'):
            for reporter in self.reporters.values():
                reporter.publish(issues, revision, diff_url)
Esempio n. 25
0
    def run(self, revision, review_request_id, diffset_revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
         * Publish results
        '''
        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster=self.taskcluster_id,
            channel=self.app_channel,
            revision=revision,
            review_request_id=review_request_id,
            diffset_revision=diffset_revision,
        )

        # Create batch review
        self.mozreview = BatchReview(
            self.mozreview_api_root,
            review_request_id,
            diffset_revision,
            max_comments=MAX_COMMENTS,
        )

        # Setup clang
        clang = ClangTidy(self.repo_dir, settings.target, self.mozreview)

        # Force cleanup to reset tip
        # otherwise previous pull are there
        self.hg.update(rev=b'tip', clean=True)

        # Pull revision from review
        self.hg.pull(source=REPO_REVIEW, rev=revision, update=True, force=True)

        # Get the parents revisions
        parent_rev = 'parents({})'.format(revision)
        parents = self.hg.identify(id=True,
                                   rev=parent_rev).decode('utf-8').strip()

        # Find modified files by this revision
        modified_files = []
        for parent in parents.split('\n'):
            changeset = '{}:{}'.format(parent, revision)
            status = self.hg.status(change=[
                changeset,
            ])
            modified_files += [f.decode('utf-8') for _, f in status]
        logger.info('Modified files', files=modified_files)

        # mach configure with mozconfig
        logger.info('Mach configure...')
        run_check(['gecko-env', './mach', 'configure'], cwd=self.repo_dir)

        # Build CompileDB backend
        logger.info('Mach build backend...')
        cmd = ['gecko-env', './mach', 'build-backend', '--backend=CompileDB']
        run_check(cmd, cwd=self.repo_dir)

        # Build exports
        logger.info('Mach build exports...')
        run_check(['gecko-env', './mach', 'build', 'pre-export'],
                  cwd=self.repo_dir)
        run_check(['gecko-env', './mach', 'build', 'export'],
                  cwd=self.repo_dir)

        # Run static analysis through run-clang-tidy.py
        logger.info('Run clang-tidy...')
        issues = clang.run(settings.clang_checkers, modified_files)

        logger.info('Detected {} code issue(s)'.format(len(issues)))
        if not issues:
            logger.info('No issues, stopping there.')
            return

        # Publish on mozreview
        self.publish_mozreview(review_request_id, diffset_revision, issues)

        # Notify by email
        logger.info('Send email to admins')
        self.notify_admins(review_request_id, issues)
Esempio n. 26
0
    def run(self, revision, review_request_id, diffset_revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
        '''
        # Force cleanup to reset tip
        # otherwise previous pull are there
        self.hg.update(rev=b'tip', clean=True)

        # Pull revision from review
        logger.info('Pull from review', revision=revision)
        self.hg.pull(source=REPO_REVIEW, rev=revision, update=True, force=True)

        # Get the parents revisions
        parent_rev = 'parents({})'.format(revision)
        parents = self.hg.identify(id=True,
                                   rev=parent_rev).decode('utf-8').strip()

        # Find modified files by this revision
        modified_files = []
        for parent in parents.split('\n'):
            changeset = '{}:{}'.format(parent, revision)
            status = self.hg.status(change=[
                changeset,
            ])
            modified_files += [f.decode('utf-8') for _, f in status]
        logger.info('Modified files', files=modified_files)

        # mach configure
        logger.info('Mach configure...')
        run_check(['gecko-env', './mach', 'configure'], cwd=self.repo_dir)

        # Build CompileDB backend
        logger.info('Mach build backend...')
        cmd = ['gecko-env', './mach', 'build-backend', '--backend=CompileDB']
        run_check(cmd, cwd=self.repo_dir)

        # Build exports
        logger.info('Mach build exports...')
        run_check(['gecko-env', './mach', 'build', 'pre-export'],
                  cwd=self.repo_dir)
        run_check(['gecko-env', './mach', 'build', 'export'],
                  cwd=self.repo_dir)

        # Run static analysis through run-clang-tidy.py
        logger.info('Run clang-tidy...')
        checks = [
            '-*',
            'clang-analyzer-deadcode.DeadStores',
            'modernize-loop-convert',
            'modernize-use-auto',
            'modernize-use-default',
            'modernize-raw-string-literal',
            # 'modernize-use-bool-literals', (too noisy because of `while (0)` in many macros)
            'modernize-use-override',
            'modernize-use-nullptr',
            'mozilla-*',
            'performance-faster-string-find',
            'performance-for-range-copy',
            'readability-else-after-return',
            'readability-misleading-indentation',
        ]
        clang = ClangTidy(self.repo_dir, 'obj-x86_64-pc-linux-gnu')
        issues = clang.run(checks, modified_files)

        logger.info('Detected {} code issue(s)'.format(len(issues)))

        # Notify by email
        if issues:
            logger.info('Send email to admins')
            self.notify_admins(review_request_id, issues)
Esempio n. 27
0
    def update_codecoveragereports_repo(self):
        if self.gecko_dev_user is None or self.gecko_dev_pwd is None:
            return

        run_check(['git', 'config', '--global', 'http.postBuffer', '12M'])
        run_check(['git', 'config', '--global', 'user.email', '*****@*****.**'])
        run_check(['git', 'config', '--global', 'user.name', 'Report Uploader'])
        repo_url = 'https://%s:%[email protected]/marco-c/code-coverage-reports' % (self.gecko_dev_user, self.gecko_dev_pwd)
        run_check(['git', 'init'])
        run_check(['git', 'add', '*'])
        run_check(['git', 'commit', '-m', 'Coverage reports upload'])
        retry(lambda: run_check(['git', 'push', repo_url, 'master', '--force']))
Esempio n. 28
0
    def run(self, revision):
        '''
        Run the local static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analyzers
        '''
        analyzers = []

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=settings.taskcluster.task_id,
            taskcluster_run=settings.taskcluster.run_id,
            channel=settings.app_channel,
            publication=settings.publication.name,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(
                settings.app_channel, revision),
            text='Task {} #{}'.format(settings.taskcluster.task_id,
                                      settings.taskcluster.run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            try:
                # Clone in a controllable process
                # and kill this new process if it exceeds the maximum allowed runtime
                clone = multiprocessing.Process(target=self.clone,
                                                args=(revision, ))
                clone.start()
                clone.join(settings.max_clone_runtime)
                if clone.is_alive():
                    logger.error(
                        'Clone watchdog expired, stopping immediately')

                    # Kill the clone process
                    clone.terminate()

                    # Stop workflow
                    raise AnalysisException('watchdog',
                                            'Clone watchdog expired')

                # Open a mercurial client in main process
                self.hg = self.open_repository()

                # Start by cloning the mercurial repository
                self.parent.index(revision, state='cloned')

                # Force cleanup to reset top of MU
                # otherwise previous pull are there
                self.hg.update(rev=self.top_revision, clean=True)
                logger.info('Set repo back to Mozilla unified top',
                            rev=self.hg.identify())
            except hglib.error.CommandError as e:
                raise AnalysisException('mercurial', str(e))

            # Load and analyze revision patch
            revision.load(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ or Java files
            if revision.has_clang_files:
                self.do_build_setup()

                # Download clang build from Taskcluster
                # Use new clang-tidy paths, https://bugzilla.mozilla.org/show_bug.cgi?id=1495641
                logger.info('Setup Taskcluster clang build...')
                setup_clang(
                    repository='mozilla-inbound',
                    revision='revision.874a07fdb045b725edc2aaa656a8620ff439ec10'
                )

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

                # Run Coverity Scan
                if COVERITY in self.analyzers:
                    logger.info('Setup Taskcluster coverity build...')
                    setup_coverity(self.index_service)
                    analyzers.append(Coverity)
                else:
                    logger.info('Skip Coverity')

                if COVERAGE in self.analyzers:
                    analyzers.append(Coverage)
                else:
                    logger.info('Skip coverage analysis')

            if revision.has_infer_files:
                if INFER in self.analyzers:
                    analyzers.append(Infer)
                    logger.info('Setup Taskcluster infer build...')
                    setup_infer(self.index_service)
                else:
                    logger.info('Skip infer')

            if not (revision.has_clang_files or revision.has_infer_files):
                logger.info(
                    'No clang or java files detected, skipping mach, infer and clang-*'
                )

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            with stats.api.timer('runtime.mach.lint'):
                out = run_check(cmd, cwd=settings.repo_dir)
            if 'error: problem with lint setup' in out.decode('utf-8'):
                raise AnalysisException('mach', 'Mach lint setup failed')

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        self.parent.index(revision, state='analyzing')
        with stats.api.timer('runtime.issues'):
            # Detect initial issues
            if settings.publication == Publication.BEFORE_AFTER:
                before_patch = self.detect_issues(analyzers, revision, True)
                logger.info('Detected {} issue(s) before patch'.format(
                    len(before_patch)))
                stats.api.increment('analysis.issues.before',
                                    len(before_patch))
                revision.reset()

            # Apply patch
            revision.apply(self.hg)

            if settings.publication == Publication.BEFORE_AFTER and revision.has_clang_files \
                    and (revision.has_clang_header_files or revision.has_idl_files):
                self.do_build_setup()

            # Detect new issues
            issues = self.detect_issues(analyzers, revision)
            logger.info('Detected {} issue(s) after patch'.format(len(issues)))
            stats.api.increment('analysis.issues.after', len(issues))

            # Mark newly found issues
            if settings.publication == Publication.BEFORE_AFTER:
                for issue in issues:
                    issue.is_new = issue not in before_patch

        # Avoid duplicates
        # but still output a list to be compatible with LocalWorkflow
        return list(set(issues))
Esempio n. 29
0
    def go(self):
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download coverage artifacts.
            executor.submit(lambda: self.download_coverage_artifacts())

            # Thread 2 - Clone and build mozilla-central
            clone_future = executor.submit(lambda: self.clone_mozilla_central(self.revision))
            # Make sure cloning mozilla-central didn't fail before building.
            clone_future.add_done_callback(lambda f: f.result())
            # Now we can build.
            clone_future.add_done_callback(lambda f: self.build_files())

        if self.from_pulse:
            if self.gecko_dev_user is not None and self.gecko_dev_pwd is not None:
                self.update_github_repo()

            commit_sha = self.get_github_commit(self.revision)
            logger.info('GitHub revision', revision=commit_sha)

            if self.gecko_dev_user is not None and self.gecko_dev_pwd is not None:
                self.post_github_status(commit_sha)

            output = self.generate_info(commit_sha)
            logger.info('Report generated successfully')

            with ThreadPoolExecutorResult(max_workers=2) as executor:
                executor.submit(lambda: uploader.coveralls(output))
                executor.submit(lambda: uploader.codecov(output, commit_sha, self.codecov_token))

            self.prepopulate_cache(commit_sha)
        else:
            mkdir('code-coverage-reports')

            self.generate_per_suite_reports()

            self.generate_zero_coverage_report()

            self.generate_chunk_mapping()

            os.chdir('code-coverage-reports')
            run_check(['git', 'config', '--global', 'http.postBuffer', '12M'])
            run_check(['git', 'config', '--global', 'user.email', '*****@*****.**'])
            run_check(['git', 'config', '--global', 'user.name', 'Report Uploader'])
            repo_url = 'https://%s:%[email protected]/marco-c/code-coverage-reports' % (self.gecko_dev_user, self.gecko_dev_pwd)
            run_check(['git', 'init'])
            run_check(['git', 'add', '*'])
            run_check(['git', 'commit', '-m', 'Coverage reports upload'])
            retry(lambda: run_check(['git', 'push', repo_url, 'master', '--force']))
Esempio n. 30
0
    def run(self, revision):
        '''
        Run the local static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analyzers
        '''
        analyzers = []

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=settings.taskcluster.task_id,
            taskcluster_run=settings.taskcluster.run_id,
            channel=settings.app_channel,
            publication=settings.publication.name,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(settings.app_channel, revision),
            text='Task {} #{}'.format(settings.taskcluster.task_id, settings.taskcluster.run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            try:
                # Clone in a controllable process
                # and kill this new process if it exceeds the maximum allowed runtime
                clone = multiprocessing.Process(target=self.clone, args=(revision, ))
                clone.start()
                clone.join(settings.max_clone_runtime)
                if clone.is_alive():
                    logger.error('Clone watchdog expired, stopping immediately')

                    # Kill the clone process
                    clone.terminate()

                    # Stop workflow
                    raise AnalysisException('watchdog', 'Clone watchdog expired')

                # Open a mercurial client in main process
                self.hg = self.open_repository()

                # Start by cloning the mercurial repository
                self.parent.index(revision, state='cloned')

                # Force cleanup to reset top of MU
                # otherwise previous pull are there
                self.hg.update(rev=self.top_revision, clean=True)
                logger.info('Set repo back to Mozilla unified top', rev=self.hg.identify())
            except hglib.error.CommandError as e:
                raise AnalysisException('mercurial', str(e))

            # Load and analyze revision patch
            revision.load(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ or Java files
            if revision.has_clang_files:
                self.do_build_setup()

                # Download clang build from Taskcluster
                # Use new clang-tidy paths, https://bugzilla.mozilla.org/show_bug.cgi?id=1495641
                logger.info('Setup Taskcluster clang build...')
                setup_clang(repository='mozilla-inbound', revision='revision.874a07fdb045b725edc2aaa656a8620ff439ec10')

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

                # Run Coverity Scan
                if COVERITY in self.analyzers:
                    logger.info('Setup Taskcluster coverity build...')
                    try:
                        setup_coverity(self.index_service)
                        analyzers.append(Coverity)
                    except Exception as e:
                        logger.error('Coverity setup failed, skipping analyzer.', error=str(e))
                else:
                    logger.info('Skip Coverity')

                if COVERAGE in self.analyzers:
                    analyzers.append(Coverage)
                else:
                    logger.info('Skip coverage analysis')

            if revision.has_infer_files:
                if INFER in self.analyzers:
                    analyzers.append(Infer)
                    logger.info('Setup Taskcluster infer build...')
                    setup_infer(self.index_service)
                else:
                    logger.info('Skip infer')

            if not (revision.has_clang_files or revision.has_infer_files):
                logger.info('No clang or java files detected, skipping mach, infer and clang-*')

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            with stats.api.timer('runtime.mach.lint'):
                out = run_check(cmd, cwd=settings.repo_dir)
            if 'error: problem with lint setup' in out.decode('utf-8'):
                raise AnalysisException('mach', 'Mach lint setup failed')

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        self.parent.index(revision, state='analyzing')
        with stats.api.timer('runtime.issues'):
            # Detect initial issues
            if settings.publication == Publication.BEFORE_AFTER:
                before_patch = self.detect_issues(analyzers, revision, True)
                logger.info('Detected {} issue(s) before patch'.format(len(before_patch)))
                stats.api.increment('analysis.issues.before', len(before_patch))
                revision.reset()

            # Apply patch
            revision.apply(self.hg)

            if settings.publication == Publication.BEFORE_AFTER and revision.has_clang_files \
                    and (revision.has_clang_header_files or revision.has_idl_files):
                self.do_build_setup()

            # Detect new issues
            issues = self.detect_issues(analyzers, revision)
            logger.info('Detected {} issue(s) after patch'.format(len(issues)))
            stats.api.increment('analysis.issues.after', len(issues))

            # Mark newly found issues
            if settings.publication == Publication.BEFORE_AFTER:
                for issue in issues:
                    issue.is_new = issue not in before_patch

        # Avoid duplicates
        # but still output a list to be compatible with LocalWorkflow
        return list(set(issues))
Esempio n. 31
0
    def run(self, revision):
        '''
        Run coverity
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        # Based on our previous configs we should already have generated compile_commands.json
        self.compile_commands_path = os.path.join(settings.repo_dir,
                                                  'obj-x86_64-pc-linux-gnu',
                                                  'compile_commands.json')

        assert os.path.exists(self.compile_commands_path), \
            'Missing Coverity in {}'.format(self.compile_commands_path)

        logger.info('Building command files from compile_commands.json')

        # Retrieve the revision files with build commands associated
        commands_list = self.get_files_with_commands()
        assert commands_list is not [], 'Commands List is empty'
        logger.info('Built commands for {} files'.format(len(commands_list)))

        cmd = ['gecko-env', self.cov_run_desktop, '--setup']
        logger.info('Running Coverity Setup', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except subprocess.CalledProcessError as e:
            raise AnalysisException(
                'coverity', 'Coverity Setup failed: {}'.format(e.output))

        cmd = ['gecko-env', self.cov_configure, '--clang']
        logger.info('Running Coverity Configure', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except subprocess.CalledProcessError as e:
            raise AnalysisException(
                'coverity', 'Coverity Configure failed: {}'.format(e.output))

        # For each element in commands_list run `cov-translate`
        for element in commands_list:
            cmd = [
                'gecko-env', self.cov_translate, '--dir', self.cov_idir_path,
                element['command']
            ]
            logger.info('Running Coverity Tranlate', cmd=cmd)
            try:
                run_check(cmd, cwd=element['directory'])
            except subprocess.CalledProcessError as e:
                raise AnalysisException(
                    'coverity',
                    'Coverity Translate failed: {}'.format(e.output))

        # Once the capture is performed we need to do the actual Coverity Desktop analysis
        cmd = [
            'gecko-env', self.cov_run_desktop, '--json-output-v6',
            'cov-results.json', '--strip-path', settings.repo_dir
        ]
        cmd += [element['file'] for element in commands_list]
        logger.info('Running Coverity Analysis', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_state_path)
        except subprocess.CalledProcessError as e:
            raise AnalysisException(
                'coverity', 'Coverity Analysis failed: {}'.format(e.output))

        # Write the results.json to the artifact directory to have it later on for debug
        coverity_results_path = os.path.join(self.cov_state_path,
                                             'cov-results.json')
        coverity_results_path_on_tc = os.path.join(
            settings.taskcluster.results_dir, 'cov-results.json')

        shutil.copyfile(coverity_results_path, coverity_results_path_on_tc)

        # Parsing the issues from coverity_results_path
        logger.info('Parsing Coverity issues')
        return self.return_issues(coverity_results_path, revision)
Esempio n. 32
0
    def run(self, revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
         * Publish results
        '''
        analyzers = []

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=self.taskcluster_task_id,
            taskcluster_run=self.taskcluster_run_id,
            channel=settings.app_channel,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(
                settings.app_channel, revision),
            text='Task {} #{}'.format(self.taskcluster_task_id,
                                      self.taskcluster_run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            # Force cleanup to reset tip
            # otherwise previous pull are there
            self.hg.update(rev=b'tip', clean=True)
            logger.info('Set repo back to tip', rev=self.hg.tip().node)

            # Apply and analyze revision patch
            revision.apply(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ files
            if revision.has_clang_files:
                # Mach pre-setup with mozconfig
                logger.info('Mach configure...')
                run_check(['gecko-env', './mach', 'configure'],
                          cwd=settings.repo_dir)

                logger.info('Mach compile db...')
                run_check([
                    'gecko-env', './mach', 'build-backend',
                    '--backend=CompileDB'
                ],
                          cwd=settings.repo_dir)

                logger.info('Mach pre-export...')
                run_check(['gecko-env', './mach', 'build', 'pre-export'],
                          cwd=settings.repo_dir)

                logger.info('Mach export...')
                run_check(['gecko-env', './mach', 'build', 'export'],
                          cwd=settings.repo_dir)

                # Download clang build from Taskcluster
                logger.info('Setup Taskcluster clang build...')
                setup_clang()

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

            else:
                logger.info(
                    'No clang files detected, skipping mach and clang-*')

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            run_check(cmd, cwd=settings.repo_dir)

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        issues = []
        for analyzer_class in analyzers:
            # Build analyzer
            logger.info('Run {}'.format(analyzer_class.__name__))
            analyzer = analyzer_class()

            # Run analyzer on version and store generated issues
            issues += analyzer.run(revision)

        logger.info('Detected {} issue(s)'.format(len(issues)))
        if not issues:
            logger.info('No issues, stopping there.')
            return

        # Build a potential improvement patch
        self.build_improvement_patch(revision, issues)

        # Publish reports about these issues
        with stats.api.timer('runtime.reports'):
            for reporter in self.reporters.values():
                reporter.publish(issues, revision)
Esempio n. 33
0
    def run(self, revision):
        '''
        Run the static analysis workflow:
         * Pull revision from review
         * Checkout revision
         * Run static analysis
         * Publish results
        '''
        analyzers = []

        # Index ASAP Taskcluster task for this revision
        self.index(revision, state='started')

        # Add log to find Taskcluster task in papertrail
        logger.info(
            'New static analysis',
            taskcluster_task=settings.taskcluster.task_id,
            taskcluster_run=settings.taskcluster.run_id,
            channel=settings.app_channel,
            publication=settings.publication.name,
            revision=str(revision),
        )
        stats.api.event(
            title='Static analysis on {} for {}'.format(settings.app_channel, revision),
            text='Task {} #{}'.format(settings.taskcluster.task_id, settings.taskcluster.run_id),
        )
        stats.api.increment('analysis')

        with stats.api.timer('runtime.mercurial'):
            try:
                # Start by cloning the mercurial repository
                self.hg = self.clone()
                self.index(revision, state='cloned')

                # Force cleanup to reset top of MU
                # otherwise previous pull are there
                self.hg.update(rev=self.top_revision, clean=True)
                logger.info('Set repo back to Mozilla unified top', rev=self.hg.identify())
            except hglib.error.CommandError as e:
                raise AnalysisException('mercurial', str(e))

            # Load and analyze revision patch
            revision.load(self.hg)
            revision.analyze_patch()

        with stats.api.timer('runtime.mach'):
            # Only run mach if revision has any C/C++ or Java files
            if revision.has_clang_files:

                # Mach pre-setup with mozconfig
                try:
                    logger.info('Mach configure...')
                    with stats.api.timer('runtime.mach.configure'):
                        run_check(['gecko-env', './mach', 'configure'], cwd=settings.repo_dir)

                    logger.info('Mach compile db...')
                    with stats.api.timer('runtime.mach.build-backend'):
                        run_check(['gecko-env', './mach', 'build-backend', '--backend=CompileDB'], cwd=settings.repo_dir)

                    logger.info('Mach pre-export...')
                    with stats.api.timer('runtime.mach.pre-export'):
                        run_check(['gecko-env', './mach', 'build', 'pre-export'], cwd=settings.repo_dir)

                    logger.info('Mach export...')
                    with stats.api.timer('runtime.mach.export'):
                        run_check(['gecko-env', './mach', 'build', 'export'], cwd=settings.repo_dir)
                except Exception as e:
                    raise AnalysisException('mach', str(e))

                # Download clang build from Taskcluster
                # Use new clang-tidy paths, https://bugzilla.mozilla.org/show_bug.cgi?id=1495641
                logger.info('Setup Taskcluster clang build...')
                setup_clang(repository='mozilla-inbound', revision='revision.874a07fdb045b725edc2aaa656a8620ff439ec10')

                # Use clang-tidy & clang-format
                if CLANG_TIDY in self.analyzers:
                    analyzers.append(ClangTidy)
                else:
                    logger.info('Skip clang-tidy')
                if CLANG_FORMAT in self.analyzers:
                    analyzers.append(ClangFormat)
                else:
                    logger.info('Skip clang-format')

            if revision.has_infer_files:
                if INFER in self.analyzers:
                    analyzers.append(Infer)
                    logger.info('Setup Taskcluster infer build...')
                    setup_infer(self.index_service)
                else:
                    logger.info('Skip infer')

            if not (revision.has_clang_files or revision.has_infer_files):
                logger.info('No clang or java files detected, skipping mach, infer and clang-*')

            # Setup python environment
            logger.info('Mach lint setup...')
            cmd = ['gecko-env', './mach', 'lint', '--list']
            with stats.api.timer('runtime.mach.lint'):
                out = run_check(cmd, cwd=settings.repo_dir)
            if 'error: problem with lint setup' in out.decode('utf-8'):
                raise AnalysisException('mach', 'Mach lint setup failed')

            # Always use mozlint
            if MOZLINT in self.analyzers:
                analyzers.append(MozLint)
            else:
                logger.info('Skip mozlint')

        if not analyzers:
            logger.error('No analyzers to use on revision')
            return

        self.index(revision, state='analyzing')
        with stats.api.timer('runtime.issues'):
            # Detect initial issues
            if settings.publication == Publication.BEFORE_AFTER:
                before_patch, _ = self.detect_issues(analyzers, revision)
                logger.info('Detected {} issue(s) before patch'.format(len(before_patch)))
                stats.api.increment('analysis.issues.before', len(before_patch))

            # Apply patch
            revision.apply(self.hg)

            # Detect new issues
            issues = self.detect_issues(analyzers, revision)
            logger.info('Detected {} issue(s) after patch'.format(len(issues)))
            stats.api.increment('analysis.issues.after', len(issues))

            # Mark newly found issues
            if settings.publication == Publication.BEFORE_AFTER:
                for issue in issues:
                    issue.is_new = issue not in before_patch

        # Avoid duplicates
        issues = set(issues)

        if not issues:
            logger.info('No issues, stopping there.')
            self.index(revision, state='done', issues=0)
            return

        # Report issues publication stats
        nb_issues = len(issues)
        nb_publishable = len([i for i in issues if i.is_publishable()])
        self.index(revision, state='analyzed', issues=nb_issues, issues_publishable=nb_publishable)
        stats.api.increment('analysis.issues.publishable', nb_publishable)

        # Publish reports about these issues
        with stats.api.timer('runtime.reports'):
            for reporter in self.reporters.values():
                reporter.publish(issues, revision)

        self.index(revision, state='done', issues=nb_issues, issues_publishable=nb_publishable)
Esempio n. 34
0
    def run(self, revision):
        '''
        Run coverity
        '''
        assert isinstance(revision, Revision)
        self.revision = revision

        # Based on our previous configs we should already have generated compile_commands.json
        self.compile_commands_path = os.path.join(settings.repo_dir, 'obj-x86_64-pc-linux-gnu', 'compile_commands.json')

        assert os.path.exists(self.compile_commands_path), \
            'Missing Coverity in {}'.format(self.compile_commands_path)

        logger.info('Building command files from compile_commands.json')

        # Retrieve the revision files with build commands associated
        commands_list = self.get_files_with_commands()
        assert commands_list is not [], 'Commands List is empty'
        logger.info('Built commands for {} files'.format(len(commands_list)))

        if len(commands_list) == 0:
            logger.info('Coverity didn\'t find any compilation units to use.')
            return []

        cmd = ['gecko-env', self.cov_run_desktop, '--setup']
        logger.info('Running Coverity Setup', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Setup failed!')

        cmd = ['gecko-env', self.cov_configure, '--clang']
        logger.info('Running Coverity Configure', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Configure failed!')

        # For each element in commands_list run `cov-translate`
        for element in commands_list:
            cmd = [
                'gecko-env', self.cov_translate, '--dir', self.cov_idir_path,
                element['command']
            ]
            logger.info('Running Coverity Tranlate', cmd=cmd)
            try:
                run_check(cmd, cwd=element['directory'])
            except click.ClickException:
                raise AnalysisException('coverity', 'Coverity Translate failed!')

        # Once the capture is performed we need to do the actual Coverity Desktop analysis
        cmd = [
            'gecko-env', self.cov_run_desktop, '--json-output-v6',
            'cov-results.json', '--strip-path', settings.repo_dir
        ]
        cmd += [element['file'] for element in commands_list]
        logger.info('Running Coverity Analysis', cmd=cmd)
        try:
            run_check(cmd, cwd=self.cov_state_path)
        except click.ClickException:
            raise AnalysisException('coverity', 'Coverity Analysis failed!')

        # Write the results.json to the artifact directory to have it later on for debug
        coverity_results_path = os.path.join(self.cov_state_path, 'cov-results.json')
        coverity_results_path_on_tc = os.path.join(settings.taskcluster.results_dir, 'cov-results.json')

        shutil.copyfile(coverity_results_path, coverity_results_path_on_tc)

        # Parsing the issues from coverity_results_path
        logger.info('Parsing Coverity issues')
        issues = self.return_issues(coverity_results_path, revision)

        # Report stats for these issues
        stats.report_issues('coverity', issues)

        return issues