コード例 #1
0
ファイル: codecov.py プロジェクト: vsham20/services
    def go(self):
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download coverage artifacts.
            executor.submit(self.artifactsHandler.download_all)

            # Thread 2 - Clone mozilla-central.
            executor.submit(self.clone_mozilla_central, self.revision)

        if self.from_pulse:
            self.githubUtils.update_geckodev_repo()

            commit_sha = self.githubUtils.get_commit(self.revision)
            logger.info('GitHub revision', revision=commit_sha)

            self.githubUtils.post_github_status(commit_sha)

            r = requests.get(
                'https://hg.mozilla.org/mozilla-central/json-rev/%s' %
                self.revision)
            r.raise_for_status()
            push_id = r.json()['pushid']

            output = grcov.report(self.artifactsHandler.get(),
                                  source_dir=self.repo_dir,
                                  service_number=push_id,
                                  commit_sha=commit_sha,
                                  token=secrets[secrets.COVERALLS_TOKEN])
            logger.info('Report generated successfully')

            with ThreadPoolExecutorResult(max_workers=2) as executor:
                executor.submit(uploader.coveralls, output)
                executor.submit(uploader.codecov, output, commit_sha)

            logger.info('Waiting for build to be ingested by Codecov...')
            # Wait until the build has been ingested by Codecov.
            if uploader.codecov_wait(commit_sha):
                logger.info('Build ingested by codecov.io')
                self.notifier.notify()
            else:
                logger.info('codecov.io took too much time to ingest data.')
        else:
            mkdir('code-coverage-reports')

            # XXX: Disabled as it is unused for now.
            # self.generate_suite_reports()

            report_generators.zero_coverage(self.artifactsHandler.get())

            self.generate_chunk_mapping()

            os.chdir('code-coverage-reports')
            self.githubUtils.update_codecoveragereports_repo()
コード例 #2
0
    def retrieve_source_and_artifacts(self):
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download coverage artifacts.
            executor.submit(self.artifactsHandler.download_all)

            # Thread 2 - Clone repository.
            executor.submit(self.clone_repository, self.repository, self.revision)
コード例 #3
0
    def download_all(self):
        os.makedirs(self.parent_dir, exist_ok=True)

        # The test tasks for the Linux and Windows builds are in the same group,
        # but the following code is generic and supports build tasks split in
        # separate groups.
        groups = set([
            taskcluster.get_task_details(build_task_id)['taskGroupId']
            for build_task_id in self.task_ids.values()
        ])
        test_tasks = [
            task for group in groups
            for task in taskcluster.get_tasks_in_group(group)
            if taskcluster.is_coverage_task(task)
        ]

        for test_task in test_tasks:
            status = test_task['status']['state']
            while status not in FINISHED_STATUSES:
                assert status in ALL_STATUSES, "State '{}' not recognized".format(
                    status)
                logger.info('Waiting for task {} to finish...'.format(
                    test_task['status']['taskId']))
                time.sleep(60)
                status = taskcluster.get_task_status(
                    test_task['status']['taskId'])

        # Choose best tasks to download (e.g. 'completed' is better than 'failed')
        download_tasks = {}
        for test_task in test_tasks:
            status = test_task['status']['state']
            assert status in FINISHED_STATUSES, "State '{}' not recognized".format(
                status)

            chunk_name = taskcluster.get_chunk(
                test_task['task']['metadata']['name'])
            platform_name = taskcluster.get_platform(
                test_task['task']['metadata']['name'])
            # Ignore awsy and talos as they aren't actually suites of tests.
            if any(to_ignore in chunk_name
                   for to_ignore in self.suites_to_ignore):
                continue

            if (chunk_name, platform_name) not in download_tasks:
                # If the chunk hasn't been downloaded before, this is obviously the best task
                # to download it from.
                download_tasks[(chunk_name, platform_name)] = test_task
            else:
                # Otherwise, compare the status of this task with the previously selected task.
                prev_task = download_tasks[(chunk_name, platform_name)]

                if STATUS_VALUE[status] > STATUS_VALUE[prev_task['status']
                                                       ['state']]:
                    download_tasks[(chunk_name, platform_name)] = test_task

        with ThreadPoolExecutorResult() as executor:
            for test_task in download_tasks.values():
                executor.submit(self.download, test_task)

        logger.info('Code coverage artifacts downloaded')
コード例 #4
0
ファイル: trainer.py プロジェクト: strodew/release-services
    def go(self):
        # Download datasets that were built by bugbug_data.
        os.makedirs('data', exist_ok=True)
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            executor.submit(lambda: urlretrieve('https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_data.latest/artifacts/public/bugs.json.xz', 'data/bugs.json.xz'))  # noqa

            executor.submit(lambda: urlretrieve('https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_data.latest/artifacts/public/commits.json.xz', 'data/commits.json.xz'))  # noqa

        # Train classifier for bug-vs-nonbug.
        self.train_bug()

        # Train classifier for regression-vs-nonregression.
        self.train_regression()

        # Train classifier for tracking bugs.
        self.train_tracking()

        # Index the task in the TaskCluster index.
        self.index_service.insertTask(
            'project.releng.services.project.{}.bugbug_train.latest'.format(secrets[secrets.APP_CHANNEL]),
            {
                'taskId': os.environ['TASK_ID'],
                'rank': 0,
                'data': {},
                'expires': (datetime.utcnow() + timedelta(31)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
            }
        )
コード例 #5
0
ファイル: codecov.py プロジェクト: helfi92/services
    def go(self):
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download coverage artifacts.
            executor.submit(self.artifactsHandler.download_all)

            # Thread 2 - Clone mozilla-central.
            executor.submit(self.clone_mozilla_central, self.revision)

        if self.from_pulse:
            self.githubUtils.update_geckodev_repo()

            commit_sha = self.githubUtils.get_commit(self.revision)
            logger.info('GitHub revision', revision=commit_sha)

            self.githubUtils.post_github_status(commit_sha)

            output = self.generate_info(commit_sha)
            logger.info('Report generated successfully')

            with ThreadPoolExecutorResult(max_workers=2) as executor:
                executor.submit(uploader.coveralls, output)
                executor.submit(uploader.codecov, output, commit_sha)

            logger.info('Waiting for build to be ingested by Codecov...')
            # Wait until the build has been ingested by Codecov.
            if uploader.codecov_wait(commit_sha):
                logger.info('Build ingested by codecov.io')
                self.notifier.notify()
            else:
                logger.info('codecov.io took too much time to ingest data.')
        else:
            mkdir('code-coverage-reports')

            self.generate_per_suite_reports()

            self.generate_zero_coverage_report()

            self.generate_chunk_mapping()

            os.chdir('code-coverage-reports')
            self.githubUtils.update_codecoveragereports_repo()
コード例 #6
0
    def go_from_trigger_mozilla_central(self):
        commit_sha = self.githubUtils.mercurial_to_git(self.revision)
        try:
            uploader.get_codecov(commit_sha)
            logger.warn('Build was already injested')
            return
        except requests.exceptions.HTTPError:
            pass

        self.retrieve_source_and_artifacts()

        self.githubUtils.update_geckodev_repo()

        logger.info('GitHub revision', revision=commit_sha)

        self.githubUtils.post_github_status(commit_sha)

        r = requests.get('https://hg.mozilla.org/mozilla-central/json-rev/%s' %
                         self.revision)
        r.raise_for_status()
        push_id = r.json()['pushid']

        output = grcov.report(self.artifactsHandler.get(),
                              source_dir=self.repo_dir,
                              service_number=push_id,
                              commit_sha=commit_sha,
                              token=secrets[secrets.COVERALLS_TOKEN])
        logger.info('Report generated successfully')

        report = json.loads(output)
        expected_extensions = ['.js', '.cpp']
        for extension in expected_extensions:
            assert any(
                f['name'].endswith(extension) for f in report['source_files']
            ), 'No {} file in the generated report'.format(extension)

        logger.info('Upload changeset coverage data to Phabricator')
        phabricatorUploader = PhabricatorUploader(self.repo_dir, self.revision)
        phabricatorUploader.upload(report)

        with ThreadPoolExecutorResult(max_workers=2) as executor:
            executor.submit(uploader.coveralls, output)
            executor.submit(uploader.codecov, output, commit_sha)

        logger.info('Waiting for build to be ingested by Codecov...')
        # Wait until the build has been ingested by Codecov.
        if uploader.codecov_wait(commit_sha):
            logger.info('Build ingested by codecov.io')
            notifier = Notifier(self.repo_dir, self.revision, self.client_id,
                                self.access_token)
            notifier.notify()
        else:
            logger.error('codecov.io took too much time to ingest data.')
コード例 #7
0
    def go(self):
        # Download models that were trained by bugbug_train.
        with ThreadPoolExecutorResult(max_workers=3) as executor:
            f1 = executor.submit(lambda: urlretrieve(
                'https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_train.latest/artifacts/public/bug.model.xz',
                'bug.model.xz'))  # noqa
            f1.add_done_callback(lambda f: self.decompress_file('bug.model'))

            f2 = executor.submit(lambda: urlretrieve(
                'https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_train.latest/artifacts/public/regression.model.xz',
                'regression.model.xz'))  # noqa
            f2.add_done_callback(
                lambda f: self.decompress_file('regression.model'))

            f3 = executor.submit(lambda: urlretrieve(
                'https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_train.latest/artifacts/public/tracking.model.xz',
                'tracking.model.xz'))  # noqa
            f3.add_done_callback(
                lambda f: self.decompress_file('tracking.model'))

        # Download bugs from the last week that we want to analyze.
        bugzilla.set_token(secrets[secrets.BUGZILLA_TOKEN])

        today = datetime.utcnow()
        one_week_ago = today - timedelta(7)
        bugzilla.download_bugs_between(one_week_ago, today)

        # Eval classifier for bug-vs-nonbug.
        self.eval_bug()

        # Eval classifier for regression-vs-nonregression.
        self.eval_regression()

        # Eval classifier for tracking bugs.
        self.eval_tracking()

        # Index the task in the TaskCluster index.
        self.index_service.insertTask(
            'project.releng.services.project.{}.bugbug_eval.latest'.format(
                secrets[secrets.APP_CHANNEL]), {
                    'taskId':
                    os.environ['TASK_ID'],
                    'rank':
                    0,
                    'data': {},
                    'expires':
                    (datetime.utcnow() +
                     timedelta(31)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
                })
コード例 #8
0
    def go(self):
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download Bugzilla data.
            executor.submit(self.retrieve_bugs)

            # Thread 2 - Clone mozilla-central and retrieve commit data.
            executor.submit(self.retrieve_commits)

        # Index the task in the TaskCluster index.
        self.index_service.insertTask(
            'project.releng.services.project.{}.bugbug_data.latest'.format(
                secrets[secrets.APP_CHANNEL]), {
                    'taskId':
                    os.environ['TASK_ID'],
                    'rank':
                    0,
                    'data': {},
                    'expires':
                    (datetime.utcnow() +
                     timedelta(31)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
                })
コード例 #9
0
    def go(self):
        # Download datasets that were built by bugbug_data.
        os.makedirs('data', exist_ok=True)
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            f1 = executor.submit(lambda: urlretrieve(
                'https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_data.latest/artifacts/public/bugs.json.xz',
                'data/bugs.json.xz'))  # noqa
            f1.add_done_callback(
                lambda f: self.decompress_file('data/bugs.json'))

            f2 = executor.submit(lambda: urlretrieve(
                'https://index.taskcluster.net/v1/task/project.releng.services.project.testing.bugbug_data.latest/artifacts/public/commits.json.xz',
                'data/commits.json.xz'))  # noqa
            f2.add_done_callback(
                lambda f: self.decompress_file('data/commits.json'))

        # Train classifier for defect-vs-enhancement-vs-task.
        self.train_defect_enhancement_task()

        # Train classifier for the component of a bug.
        self.train_component()

        # Train classifier for regression-vs-nonregression.
        self.train_regression()

        # Train classifier for tracking bugs.
        self.train_tracking()

        # Index the task in the TaskCluster index.
        self.index_service.insertTask(
            f'project.releng.services.project.{secrets[secrets.APP_CHANNEL]}.bugbug_train.latest',
            {
                'taskId':
                os.environ['TASK_ID'],
                'rank':
                0,
                'data': {},
                'expires': (datetime.utcnow() +
                            timedelta(31)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
            })
コード例 #10
0
ファイル: codecov.py プロジェクト: bnjbvr/services
    def go(self):
        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download coverage artifacts.
            executor.submit(self.artifactsHandler.download_all)

            # Thread 2 - Clone mozilla-central.
            executor.submit(self.clone_mozilla_central, self.revision)

        if self.from_pulse:
            self.githubUtils.update_geckodev_repo()

            commit_sha = self.githubUtils.get_commit(self.revision)
            logger.info('GitHub revision', revision=commit_sha)

            self.githubUtils.post_github_status(commit_sha)

            r = requests.get('https://hg.mozilla.org/mozilla-central/json-rev/%s' % self.revision)
            r.raise_for_status()
            push_id = r.json()['pushid']

            output = grcov.report(
                self.artifactsHandler.get(),
                source_dir=self.repo_dir,
                service_number=push_id,
                commit_sha=commit_sha,
                token=secrets[secrets.COVERALLS_TOKEN]
            )
            logger.info('Report generated successfully')

            with ThreadPoolExecutorResult(max_workers=2) as executor:
                executor.submit(uploader.coveralls, output)
                executor.submit(uploader.codecov, output, commit_sha)

            logger.info('Waiting for build to be ingested by Codecov...')
            # Wait until the build has been ingested by Codecov.
            if uploader.codecov_wait(commit_sha):
                logger.info('Build ingested by codecov.io')
                self.notifier.notify()
            else:
                logger.error('codecov.io took too much time to ingest data.')
        else:
            os.makedirs('code-coverage-reports', exist_ok=True)

            self.generate_suite_reports()

            report_generators.zero_coverage(self.artifactsHandler.get())

            self.generate_chunk_mapping()

            # Index the task in the TaskCluster index.
            self.index_service.insertTask(
                'project.releng.services.project.{}.shipit_code_coverage.{}'.format(secrets[secrets.APP_CHANNEL], self.revision),
                {
                    'taskId': os.environ['TASK_ID'],
                    'rank': 0,
                    'data': {},
                    'expires': (datetime.utcnow() + timedelta(180)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
                }
            )

            os.chdir('code-coverage-reports')
            self.githubUtils.update_codecoveragereports_repo()
コード例 #11
0
    def go_from_trigger_mozilla_central(self):
        commit_sha = self.githubUtils.mercurial_to_git(self.revision)
        try:
            uploader.get_codecov(commit_sha)
            logger.warn('Build was already injested')
            return
        except requests.exceptions.HTTPError:
            pass

        self.retrieve_source_and_artifacts()

        self.githubUtils.update_geckodev_repo()

        logger.info('GitHub revision', revision=commit_sha)

        self.githubUtils.post_github_status(commit_sha)

        r = requests.get('https://hg.mozilla.org/mozilla-central/json-rev/%s' %
                         self.revision)
        r.raise_for_status()
        push_id = r.json()['pushid']

        # Check that all JavaScript files present in the coverage artifacts actually exist.
        # If they don't, there might be a bug in the LCOV rewriter.
        for artifact in self.artifactsHandler.get():
            if 'jsvm' not in artifact:
                continue

            with zipfile.ZipFile(artifact, 'r') as zf:
                for file_name in zf.namelist():
                    with zf.open(file_name, 'r') as fl:
                        source_files = [
                            line[3:].decode('utf-8').rstrip() for line in fl
                            if line.startswith(b'SF:')
                        ]
                        missing_files = [
                            f for f in source_files if
                            not os.path.exists(os.path.join(self.repo_dir, f))
                        ]
                        if len(missing_files) != 0:
                            logger.warn(
                                f'{missing_files} are present in coverage reports, but missing from the repository'
                            )

        output = grcov.report(self.artifactsHandler.get(),
                              source_dir=self.repo_dir,
                              service_number=push_id,
                              commit_sha=commit_sha,
                              token=secrets[secrets.COVERALLS_TOKEN])
        logger.info('Report generated successfully')

        report = json.loads(output)
        expected_extensions = ['.js', '.cpp']
        for extension in expected_extensions:
            assert any(
                f['name'].endswith(extension) for f in report['source_files']
            ), 'No {} file in the generated report'.format(extension)

        logger.info('Upload changeset coverage data to Phabricator')
        phabricatorUploader = PhabricatorUploader(self.repo_dir, self.revision)
        phabricatorUploader.upload(report)

        with ThreadPoolExecutorResult(max_workers=2) as executor:
            executor.submit(uploader.coveralls, output)
            executor.submit(uploader.codecov, output, commit_sha)

        logger.info('Waiting for build to be ingested by Codecov...')
        # Wait until the build has been ingested by Codecov.
        if uploader.codecov_wait(commit_sha):
            logger.info('Build ingested by codecov.io')
            notifier = Notifier(self.repo_dir, self.revision, self.client_id,
                                self.access_token)
            notifier.notify()
        else:
            logger.error('codecov.io took too much time to ingest data.')
コード例 #12
0
ファイル: codecov.py プロジェクト: paras0419/release-services
    def go(self):
        if self.from_pulse:
            commit_sha = self.githubUtils.mercurial_to_git(self.revision)
            try:
                uploader.get_codecov(commit_sha)
                logger.warn('Build was already injested')
                return
            except requests.exceptions.HTTPError:
                pass

        with ThreadPoolExecutorResult(max_workers=2) as executor:
            # Thread 1 - Download coverage artifacts.
            executor.submit(self.artifactsHandler.download_all)

            # Thread 2 - Clone mozilla-central.
            executor.submit(self.clone_mozilla_central, self.revision)

        if self.from_pulse:
            self.githubUtils.update_geckodev_repo()

            logger.info('GitHub revision', revision=commit_sha)

            self.githubUtils.post_github_status(commit_sha)

            r = requests.get(
                'https://hg.mozilla.org/mozilla-central/json-rev/%s' %
                self.revision)
            r.raise_for_status()
            push_id = r.json()['pushid']

            output = grcov.report(self.artifactsHandler.get(),
                                  source_dir=self.repo_dir,
                                  service_number=push_id,
                                  commit_sha=commit_sha,
                                  token=secrets[secrets.COVERALLS_TOKEN])
            logger.info('Report generated successfully')

            logger.info('Upload changeset coverage data to Phabricator')
            phabricatorUploader = PhabricatorUploader(self.repo_dir,
                                                      self.revision)
            phabricatorUploader.upload(json.loads(output))

            with ThreadPoolExecutorResult(max_workers=2) as executor:
                executor.submit(uploader.coveralls, output)
                executor.submit(uploader.codecov, output, commit_sha)

            logger.info('Waiting for build to be ingested by Codecov...')
            # Wait until the build has been ingested by Codecov.
            if uploader.codecov_wait(commit_sha):
                logger.info('Build ingested by codecov.io')
                self.notifier.notify()
            else:
                logger.error('codecov.io took too much time to ingest data.')
        else:
            logger.info('Generating suite reports')
            os.makedirs(self.ccov_reports_dir, exist_ok=True)
            suite_reports.generate(self.suites, self.artifactsHandler,
                                   self.ccov_reports_dir, self.repo_dir)

            logger.info('Generating zero coverage reports')
            zc = ZeroCov(self.repo_dir)
            zc.generate(self.artifactsHandler.get(), self.revision,
                        self.github_revision)

            logger.info('Generating chunk mapping')
            chunk_mapping.generate(self.repo_dir, self.revision,
                                   self.artifactsHandler)

            # Index the task in the TaskCluster index at the given revision and as "latest".
            # Given that all tasks have the same rank, the latest task that finishes will
            # overwrite the "latest" entry.
            namespaces = [
                'project.releng.services.project.{}.code_coverage_bot.{}'.
                format(secrets[secrets.APP_CHANNEL], self.revision),
                'project.releng.services.project.{}.code_coverage_bot.latest'.
                format(secrets[secrets.APP_CHANNEL]),
            ]

            for namespace in namespaces:
                self.index_service.insertTask(
                    namespace, {
                        'taskId':
                        os.environ['TASK_ID'],
                        'rank':
                        0,
                        'data': {},
                        'expires':
                        (datetime.utcnow() +
                         timedelta(180)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
                    })

            os.chdir(self.ccov_reports_dir)
            self.githubUtils.update_codecoveragereports_repo()