Esempio n. 1
0
    def index_task(self, namespaces, ttl=180):
        """
        Index current task on Taskcluster Index
        TTL is expressed in days
        """
        assert isinstance(ttl, int) and ttl > 0
        task_id = os.environ.get("TASK_ID")
        if task_id is None:
            logger.warning(
                "Skipping Taskcluster indexation, no task id found.")
            return

        index_service = taskcluster_config.get_service("index")

        for namespace in namespaces:
            index_service.insertTask(
                namespace,
                {
                    "taskId":
                    task_id,
                    "rank":
                    0,
                    "data": {},
                    "expires":
                    (datetime.utcnow() +
                     timedelta(ttl)).strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
                },
            )
Esempio n. 2
0
    def __init__(self, repository, revision, task_name_filter, cache_root):
        # List of test-suite, sorted alphabetically.
        # This way, the index of a suite in the array should be stable enough.
        self.suites = ["web-platform-tests"]

        self.cache_root = cache_root

        temp_dir = tempfile.mkdtemp()
        self.artifacts_dir = os.path.join(temp_dir, "ccov-artifacts")

        self.index_service = taskcluster_config.get_service("index")

        if revision is None:
            # Retrieve latest ingested revision
            self.repository = MOZILLA_CENTRAL_REPOSITORY
            try:
                self.revision = uploader.gcp_latest("mozilla-central")[0]["revision"]
            except Exception as e:
                logger.warn(
                    "Failed to retrieve the latest reports ingested: {}".format(e)
                )
                raise
            self.from_pulse = False
        else:
            self.repository = repository
            self.revision = revision
            self.from_pulse = True

        self.branch = self.repository[len(HG_BASE) :]

        assert os.path.isdir(cache_root), "Cache root {} is not a dir.".format(
            cache_root
        )
        self.repo_dir = os.path.join(cache_root, self.branch)

        logger.info("Mercurial revision", revision=self.revision)

        task_ids = {}
        for platform in ["linux", "windows", "android-test", "android-emulator"]:
            task = taskcluster.get_task(self.branch, self.revision, platform)

            # On try, developers might have requested to run only one platform, and we trust them.
            # On mozilla-central, we want to assert that every platform was run (except for android platforms
            # as they are unstable).
            if task is not None:
                task_ids[platform] = task
            elif (
                self.repository == MOZILLA_CENTRAL_REPOSITORY
                and not platform.startswith("android")
            ):
                raise Exception("Code coverage build failed and was not indexed.")

        self.artifactsHandler = ArtifactsHandler(
            task_ids, self.artifacts_dir, task_name_filter
        )
Esempio n. 3
0
def notify_email(revision, changesets, changesets_coverage):
    """
    Send an email to admins when low coverage for new commits is detected
    """
    notify_service = taskcluster_config.get_service("notify")

    content = ""
    for changeset in changesets:
        desc = changeset["desc"].split("\n")[0]

        if any(text in desc for text in ["r=merge", "a=merge"]):
            continue

        rev = changeset["node"]

        # Lookup changeset coverage from phabricator uploader
        rev_id = parse_revision_id(changeset["desc"])
        if rev_id is None:
            continue
        coverage = changesets_coverage.get(rev_id)
        if coverage is None:
            logger.warn("No coverage found", changeset=changeset)
            continue

        # Calc totals for all files
        covered = sum(c["lines_covered"] for c in coverage.values())
        added = sum(c["lines_added"] for c in coverage.values())

        if covered < 0.2 * added:
            content += "* [{}](https://firefox-code-coverage.herokuapp.com/#/changeset/{}): {} covered out of {} added.\n".format(
                desc, rev, covered, added
            )  # noqa

    if content == "":
        return
    elif len(content) > 102400:
        # Content is 102400 chars max
        content = content[:102000] + "\n\n... Content max limit reached!"

    for email in secrets[secrets.EMAIL_ADDRESSES]:
        notify_service.email(
            {
                "address": email,
                "subject": "Coverage patches for {}".format(revision),
                "content": content,
                "template": "fullscreen",
            }
        )

    return content
Esempio n. 4
0
def trigger_task(task_group_id: str, revision: str) -> None:
    """
    Trigger a code coverage task to build covdir at a specified revision
    """
    hooks = taskcluster_config.get_service("hooks")
    hooks.triggerHook(
        "project-relman",
        f"code-coverage-repo-{secrets[secrets.APP_CHANNEL]}",
        {
            "REPOSITORY": config.MOZILLA_CENTRAL_REPOSITORY,
            "REVISION": revision,
            "taskGroupId": task_group_id,
            "taskName": "covdir for {}".format(revision),
        },
    )
Esempio n. 5
0
def trigger_task(task_group_id, commit):
    """
    Trigger a code coverage task to build covdir at a specified revision
    """
    date = datetime.fromtimestamp(commit["date"]).strftime("%Y-%m-%d")
    name = "covdir with suites on {} - {} - {}".format(
        secrets[secrets.APP_CHANNEL], date, commit["changeset"])
    hooks = taskcluster_config.get_service("hooks")
    payload = {
        "REPOSITORY": MC_REPO,
        "REVISION": commit["changeset"],
        "taskGroupId": task_group_id,
        "taskName": name,
    }
    hook_id = HOOK_ID.format(app_channel=secrets[secrets.APP_CHANNEL])
    return hooks.triggerHook(HOOK_GROUP, hook_id, payload)
Esempio n. 6
0
def trigger_task(task_group_id, repository, commit):
    """
    Trigger a code coverage task to build covdir at a specified revision
    """
    assert isinstance(commit, str)
    name = "covdir {} - {} - {}".format(secrets[secrets.APP_CHANNEL],
                                        repository, commit)
    hooks = taskcluster_config.get_service("hooks")
    payload = {
        "REPOSITORY": repository,
        "REVISION": commit,
        "taskGroupId": task_group_id,
        "taskName": name,
    }
    hook_id = HOOK_ID.format(app_channel=secrets[secrets.APP_CHANNEL])
    return hooks.triggerHook(HOOK_GROUP, hook_id, payload)
Esempio n. 7
0
def notify_email(revision, changesets, changesets_coverage):
    """
    Send an email to admins when low coverage for new commits is detected
    """
    notify_service = taskcluster_config.get_service("notify")

    content = ""
    for changeset in changesets:
        desc = changeset["desc"].split("\n")[0]

        # Lookup changeset coverage from phabricator uploader
        rev_id = parse_revision_id(changeset["desc"])
        if rev_id is None:
            continue
        coverage = changesets_coverage.get(changeset["node"])
        if coverage is None:
            logger.warn("No coverage found", changeset=changeset)
            continue

        # Calc totals for all files
        covered = sum(
            c["lines_covered"] + c["lines_unknown"] for c in coverage["paths"].values()
        )
        added = sum(c["lines_added"] for c in coverage["paths"].values())

        if covered < 0.4 * added:
            url = parse_revision_url(changeset["desc"])
            content += f"* [{desc}]({url}): {covered} covered out of {added} added.\n"

    if content == "":
        return
    elif len(content) > 102400:
        # Content is 102400 chars max
        content = content[:102000] + "\n\n... Content max limit reached!"

    for email in secrets[secrets.EMAIL_ADDRESSES]:
        notify_service.email(
            {
                "address": email,
                "subject": "Coverage patches for {}".format(revision),
                "content": content,
                "template": "fullscreen",
            }
        )

    return content
Esempio n. 8
0
def main():
    # CLI args
    parser = argparse.ArgumentParser()
    parser.add_argument("--nb-tasks",
                        type=int,
                        default=5,
                        help="NB of tasks to create")
    parser.add_argument("--group",
                        type=str,
                        default=slugId(),
                        help="Task group to create/update")
    parser.add_argument(
        "--dry-run",
        action="store_true",
        default=False,
        help="List actions without triggering any new task",
    )
    parser.add_argument("history",
                        type=open,
                        help="JSON payload of /v2/history endpoint")
    args = parser.parse_args()

    # Setup Taskcluster
    taskcluster_config.auth()
    secrets.load(os.environ["TASKCLUSTER_SECRET"])

    # List existing tags & commits
    print("Group", args.group)
    queue = taskcluster_config.get_service("queue")
    try:
        group = queue.listTaskGroup(args.group)
        commits = [
            task["task"]["payload"]["env"]["REVISION"]
            for task in group["tasks"]
            if task["status"]["state"] not in ("failed", "exception")
        ]
        print("Found {} commits processed in task group {}".format(
            len(commits), args.group))
    except Exception as e:
        print("Invalid task group : {}".format(e))
        commits = []

    # Read the history file
    history = json.load(args.history)

    # Load initial dates from our history
    history_dates = {
        item["changeset"]: datetime.fromtimestamp(item["date"]).date()
        for item in history
    }
    dates = [
        history_dates[commit] for commit in commits if commit in history_dates
    ]

    # Trigger a task for each commit
    nb = 0
    for commit in history:
        date = datetime.fromtimestamp(commit["date"])
        if nb >= args.nb_tasks:
            break
        if commit["changeset"] in commits:
            print(
                f"Skipping commit {commit['changeset']} from {date} : already processed"
            )
            continue

        if date.date() in dates:
            print(
                f"Skipping commit {commit['changeset']} from {date} : same day"
            )
            continue

        print(f"Triggering commit {commit['changeset']} from {date}")
        if args.dry_run:
            print(">>> No trigger on dry run")
        else:
            out = trigger_task(args.group, commit)
            print(">>>", out["status"]["taskId"])
        nb += 1
        dates.append(date.date())
Esempio n. 9
0
import argparse
import os

from taskcluster.utils import slugId

from code_coverage_bot.secrets import secrets
from code_coverage_bot.taskcluster import taskcluster_config

CODECOV_URL = "https://codecov.io/api/gh/marco-c/gecko-dev/commit"
HOOK_GROUP = "project-relman"
HOOK_ID = "code-coverage-{app_channel}"

taskcluster_config.auth(os.environ.get("TASKCLUSTER_CLIENT_ID"),
                        os.environ.get("TASKCLUSTER_ACCESS_TOKEN"))
secrets.load(os.environ["TASKCLUSTER_SECRET"])
queue = taskcluster_config.get_service("queue")


def list_commits(tasks):
    """
    Read the revision from an existing code coverage task
    """
    for task_id in tasks:
        try:
            task = queue.task(task_id)
            env = task["payload"]["env"]
            yield env["REPOSITORY"], env["REVISION"]
        except Exception as e:
            print("Failed to load task {}: {}".format(task_id, e))