Esempio n. 1
0
    def build_tasks(self, parent_task_id: str, env: Optional[Dict[str, str]] = None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()
        preprocess_task_id = None

        preprocess = cast(PoolConfiguration, self.create_preprocess())
        if preprocess is not None:
            assert preprocess.max_run_time is not None
            task = yaml.safe_load(
                FUZZING_TASK.substitute(
                    created=stringDate(now),
                    deadline=stringDate(
                        now + timedelta(seconds=preprocess.max_run_time)
                    ),
                    description=DESCRIPTION.replace("\n", "\\n"),
                    expires=stringDate(fromNow("4 weeks", now)),
                    max_run_time=preprocess.max_run_time,
                    name=f"Fuzzing task {self.task_id} - preprocess",
                    owner_email=OWNER_EMAIL,
                    pool_id=self.pool_id,
                    provisioner=PROVISIONER_ID,
                    scheduler=SCHEDULER_ID,
                    secret=DECISION_TASK_SECRET,
                    task_group=parent_task_id,
                    task_id=self.task_id,
                )
            )
            task["payload"]["env"]["TASKCLUSTER_FUZZING_PREPROCESS"] = "1"
            configure_task(task, preprocess, now, env)
            preprocess_task_id = slugId()
            yield preprocess_task_id, task

        assert self.max_run_time is not None
        assert self.tasks is not None
        for i in range(1, self.tasks + 1):
            task = yaml.safe_load(
                FUZZING_TASK.substitute(
                    created=stringDate(now),
                    deadline=stringDate(now + timedelta(seconds=self.max_run_time)),
                    description=DESCRIPTION.replace("\n", "\\n"),
                    expires=stringDate(fromNow("4 weeks", now)),
                    max_run_time=self.max_run_time,
                    name=f"Fuzzing task {self.task_id} - {i}/{self.tasks}",
                    owner_email=OWNER_EMAIL,
                    pool_id=self.pool_id,
                    provisioner=PROVISIONER_ID,
                    scheduler=SCHEDULER_ID,
                    secret=DECISION_TASK_SECRET,
                    task_group=parent_task_id,
                    task_id=self.task_id,
                )
            )
            if preprocess_task_id is not None:
                task["dependencies"].append(preprocess_task_id)
            configure_task(task, self, now, env)
            yield slugId(), task
 def test_slug_id_nice_stays_nice(self):
     with mock.patch('uuid.uuid4') as p:
         # first bit of uuid unset, should remain unset
         p.return_value = uuid.UUID('3ed97923-7616-4ec8-85ed-4b695f67ac2e')
         expected = b'Ptl5I3YWTsiF7UtpX2esLg'
         actual = subject.slugId()
         self.assertEqual(expected, actual)
Esempio n. 3
0
def test_slug_id_nice_stays_nice():
    with mock.patch('uuid.uuid4') as p:
        # first bit of uuid unset, should remain unset
        p.return_value = uuid.UUID('3ed97923-7616-4ec8-85ed-4b695f67ac2e')
        expected = 'Ptl5I3YWTsiF7UtpX2esLg'
        actual = subject.slugId()
        assert expected == actual
Esempio n. 4
0
    def run_task(self, task_definition, ttl=5, extra_env={}):
        """
        Create a new task on Taskcluster
        """
        assert isinstance(task_definition, dict)

        # Update the env in task
        task_definition['payload']['env'].update(extra_env)

        # Get taskcluster queue
        queue = self.taskcluster.get_queue_service()

        # Build task id
        task_id = slugId().decode('utf-8')

        # Set dates
        now = datetime.utcnow()
        task_definition['created'] = now
        task_definition['deadline'] = now + timedelta(seconds=ttl * 3600)
        logger.info('Creating a new task',
                    id=task_id,
                    name=task_definition['metadata']['name'])  # noqa

        # Create a new task
        return queue.createTask(task_id, task_definition)
Esempio n. 5
0
    def build_tasks(self, parent_task_id: str, env: Optional[Dict[str, str]] = None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()

        for pool in self.iterpools():
            assert pool.max_run_time is not None
            assert pool.tasks is not None
            for i in range(1, pool.tasks + 1):
                task = yaml.safe_load(
                    FUZZING_TASK.substitute(
                        created=stringDate(now),
                        deadline=stringDate(now + timedelta(seconds=pool.max_run_time)),
                        description=DESCRIPTION.replace("\n", "\\n"),
                        expires=stringDate(fromNow("4 weeks", now)),
                        max_run_time=pool.max_run_time,
                        name=(
                            f"Fuzzing task {pool.platform}-{pool.pool_id} - "
                            f"{i}/{pool.tasks}"
                        ),
                        owner_email=OWNER_EMAIL,
                        pool_id=pool.pool_id,
                        provisioner=PROVISIONER_ID,
                        scheduler=SCHEDULER_ID,
                        secret=DECISION_TASK_SECRET,
                        task_group=parent_task_id,
                        task_id=self.task_id,
                    )
                )
                configure_task(task, cast(PoolConfiguration, pool), now, env)
                yield slugId(), task
Esempio n. 6
0
def create_task(**kwargs):
    """ Create a TC task.

    NOTE: This code needs to be tested for normal TC tasks to determine
    if the default values would also work for non BBB tasks.
    """
    task_id = kwargs.get('taskId', slugId())

    task_definition = {
        'taskId': task_id,
        # Do not retry the task if it fails to run successfully
        'reruns': kwargs.get('reruns', 0),
        'task': {
            'workerType': kwargs['workerType'],  # mandatory
            'provisionerId': kwargs['provisionerId'],  # mandatory
            'created': kwargs.get('created', fromNow('0d')),
            'deadline': kwargs.get('deadline', fromNow('1d')),
            'expires': kwargs.get('deadline', fromNow('1d')),
            'payload': kwargs.get('payload', {}),
            'metadata': kwargs['metadata'],  # mandatory
            'schedulerId': kwargs.get('schedulerId', 'task-graph-scheduler'),
            'tags': kwargs.get('tags', {}),
            'extra': kwargs.get('extra', {}),
            'routes': kwargs.get('routes', []),
            'priority': kwargs.get('priority', 'normal'),
            'retries': kwargs.get('retries', 5),
            'scopes': kwargs.get('scopes', []),
        }
    }

    if kwargs.get('taskGroupId'):
        task_definition['task']['taskGroupId'] = kwargs.get('taskGroupId', task_id),

    return task_definition
Esempio n. 7
0
 def _create_push_task(self, service, service_build_tasks):
     push_task = yaml_load(
         PUSH_TASK.substitute(
             clone_url=self.github_event.clone_url,
             commit=self.github_event.commit,
             deadline=stringDate(self.now + DEADLINE),
             docker_secret=self.docker_secret,
             max_run_time=int(MAX_RUN_TIME.total_seconds()),
             now=stringDate(self.now),
             owner_email=OWNER_EMAIL,
             provisioner=PROVISIONER_ID,
             scheduler=SCHEDULER_ID,
             service_name=service.name,
             source_url=SOURCE_URL,
             task_group=self.task_group,
             worker=WORKER_TYPE,
         )
     )
     push_task["dependencies"].append(service_build_tasks[service.name])
     task_id = slugId()
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, push_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, push_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating push task: %s", exc)
             raise
     return task_id
Esempio n. 8
0
 def test_slug_id_is_always_nice(self):
     with mock.patch('uuid.uuid4') as p:
         # first bit of uuid set, which should get unset
         p.return_value = uuid.UUID('bed97923-7616-4ec8-85ed-4b695f67ac2e')
         expected = b'Ptl5I3YWTsiF7UtpX2esLg'
         actual = subject.slugId()
         self.assertEqual(expected, actual)
Esempio n. 9
0
    def build_tasks(self, parent_task_id, env=None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()

        for pool in self.iterpools():
            for i in range(1, pool.tasks + 1):
                task = yaml.safe_load(
                    FUZZING_TASK.substitute(
                        created=stringDate(now),
                        deadline=stringDate(now + timedelta(
                            seconds=pool.max_run_time)),
                        description=DESCRIPTION.replace("\n", "\\n"),
                        expires=stringDate(fromNow("1 week", now)),
                        max_run_time=pool.max_run_time,
                        name=(f"Fuzzing task {pool.platform}-{pool.pool_id} - "
                              f"{i}/{pool.tasks}"),
                        owner_email=OWNER_EMAIL,
                        pool_id=pool.pool_id,
                        provisioner=PROVISIONER_ID,
                        scheduler=SCHEDULER_ID,
                        secret=DECISION_TASK_SECRET,
                        task_group=parent_task_id,
                        task_id=self.task_id,
                    ))
                task["payload"]["artifacts"].update(
                    pool.artifact_map(stringDate(fromNow("1 week", now))))
                # `container` can be either a string or a dict, so can't template it
                task["payload"]["image"] = pool.container
                task["scopes"] = sorted(chain(pool.scopes, task["scopes"]))
                add_capabilities_for_scopes(task)
                if env is not None:
                    assert set(task["payload"]["env"]).isdisjoint(set(env))
                    task["payload"]["env"].update(env)

                yield slugId(), task
Esempio n. 10
0
def create_task(**kwargs):
    """ Create a TC task.

    NOTE: This code needs to be tested for normal TC tasks to determine
    if the default values would also work for non BBB tasks.
    """
    task_id = kwargs.get('taskId', slugId())

    task_definition = {
        'taskId': task_id,
        # Do not retry the task if it fails to run successfully
        'reruns': kwargs.get('reruns', 0),
        'task': {
            'workerType': kwargs['workerType'],  # mandatory
            'provisionerId': kwargs['provisionerId'],  # mandatory
            'created': kwargs.get('created', fromNow('0d')),
            'deadline': kwargs.get('deadline', fromNow('1d')),
            'expires': kwargs.get('deadline', fromNow('1d')),
            'payload': kwargs.get('payload', {}),
            'metadata': kwargs['metadata'],  # mandatory
            'schedulerId': kwargs.get('schedulerId', 'task-graph-scheduler'),
            'tags': kwargs.get('tags', {}),
            'extra': kwargs.get('extra', {}),
            'routes': kwargs.get('routes', []),
            'priority': kwargs.get('priority', 'normal'),
            'retries': kwargs.get('retries', 5),
            'scopes': kwargs.get('scopes', []),
        }
    }

    if kwargs.get('taskGroupId'):
        task_definition['task']['taskGroupId'] = kwargs.get(
            'taskGroupId', task_id),

    return task_definition
Esempio n. 11
0
def main():
    # CLI args
    parser = argparse.ArgumentParser()
    parser.add_argument("--nb-tasks", type=int, default=5, help="NB of tasks to create")
    parser.add_argument(
        "--unique",
        choices=("day", "week"),
        help="Trigger only one task per day or week",
    )
    parser.add_argument(
        "--group", type=str, default=slugId(), help="Task group to create/update"
    )
    parser.add_argument(
        "--dry-run",
        action="store_true",
        default=False,
        help="List actions without triggering any new task",
    )
    parser.add_argument(
        "--codecov-token",
        type=str,
        default=os.environ.get("CODECOV_TOKEN"),
        help="Codecov access token",
    )
    args = parser.parse_args()

    # Download revision mapper database
    print("Downloading revision database...")
    download_mapfile()

    # List existing tags & commits
    print("Group", args.group)
    queue = taskcluster.get_service("queue")
    try:
        group = queue.listTaskGroup(args.group)
        commits = [
            task["task"]["payload"]["env"]["REVISION"]
            for task in group["tasks"]
            if task["status"]["state"] not in ("failed", "exception")
        ]
        print(
            "Found {} commits processed in task group {}".format(
                len(commits), args.group
            )
        )
    except Exception as e:
        print("Invalid task group : {}".format(e))
        commits = []

    # Trigger a task for each commit
    for commit in list_commits(args.codecov_token, args.nb_tasks, args.unique, commits):
        print("Triggering commit {mercurial} from {timestamp}".format(**commit))
        if args.dry_run:
            print(">>> No trigger on dry run")
        else:
            out = trigger_task(args.group, commit)
            print(">>>", out["status"]["taskId"])
Esempio n. 12
0
 def test_insert_to_index(self):
   payload = {
     'taskId': utils.slugId(),
     'rank': 1,
     'data': {'test': 'data'},
     'expires': '2015-09-09T19:19:15.879Z'
   }
   result = self.i.insertTask('testing', payload)
   self.assertEqual(payload['expires'], result['expires'])
Esempio n. 13
0
def main():
    # CLI args
    parser = argparse.ArgumentParser()
    parser.add_argument("--nb-tasks",
                        type=int,
                        default=5,
                        help="NB of tasks to create")
    parser.add_argument("--group",
                        type=str,
                        default=slugId(),
                        help="Task group to create/update")
    parser.add_argument(
        "--dry-run",
        action="store_true",
        default=False,
        help="List actions without triggering any new task",
    )
    parser.add_argument("tasks", nargs="+", help="Existing tasks to retrigger")
    args = parser.parse_args()

    # List existing tags & commits
    print("Group", args.group)
    try:
        group = queue.listTaskGroup(args.group)
        commits = set([(
            task["task"]["payload"]["env"]["REPOSITORY"],
            task["task"]["payload"]["env"]["REVISION"],
        ) for task in group["tasks"]
                       if task["status"]["state"] not in ("failed",
                                                          "exception")])
        print("Found {} commits processed in task group {}".format(
            len(commits), args.group))
    except Exception as e:
        print("Invalid task group : {}".format(e))
        commits = set()

    # Trigger a task for each commit
    triggered = 0
    for repository, commit in list_commits(args.tasks):
        if (repository, commit) in commits:
            print("Skipping existing commit {} {}".format(repository, commit))
            continue

        print("Triggering {} : {}".format(repository, commit))
        if args.dry_run:
            print(">>> No trigger on dry run")
        else:
            out = trigger_task(args.group, repository, commit)
            print(">>>", out["status"]["taskId"])
            triggered += 1

        commits.add((repository, commit))
        if triggered >= args.nb_tasks:
            print("Max nb tasks reached !")
            break
Esempio n. 14
0
def _create_task(buildername, repo_name, revision, metadata, requires=None):
    """Return takcluster task to trigger a buildbot builder.

    This function creates a generic task with the minimum amount of
    information required for the buildbot-bridge to consider it valid.
    You can establish a list dependencies to other tasks through the requires field.

    :param buildername: The name of a buildbot builder.
    :type buildername: str
    :param repo_name The name of a repository e.g. mozilla-inbound, alder et al.
    :type repo_name: str
    :param revision: Changeset ID of a revision.
    :type revision: str
    :param metadata: Dictionary with metadata values about the task.
    :type metadata: str
    :param requires: List of taskIds of other tasks which this task depends on.
    :type requires: str
    :returns: TaskCluster graph
    :rtype: dict

    """
    task = {
        'taskId': slugId(),
        'reruns': 0,  # Do not retry the task if it fails to run successfuly
        'task': {
            'workerType': 'buildbot-bridge',
            'provisionerId': 'buildbot-bridge',
            # XXX: check if tc client has something more like now
            'created': fromNow('0d'),
            'deadline': fromNow('1d'),
            'payload': {
                'buildername': buildername,
                'sourcestamp': {
                    'branch': repo_name,
                    'revision': revision
                },
                # Needed because of bug 1195751
                'properties': {
                    'product':
                    get_builder_information(buildername)['properties']
                    ['product'],
                    'who':
                    metadata['owner']
                }
            },
            'metadata': dict(metadata.items() + {'name': buildername}.items()),
        }
    }

    if requires:
        task['requires'] = requires

    return task
Esempio n. 15
0
 def _create_svc_test_task(self, service, test, service_build_tasks):
     image = test.image
     deps = []
     if image in service_build_tasks:
         if self.services[image].dirty:
             deps.append(service_build_tasks[image])
             image = {
                 "type": "task-image",
                 "taskId": service_build_tasks[image],
             }
         else:
             image = {
                 "type": "indexed-image",
                 "namespace": (f"project.fuzzing.orion.{image}.{self.push_branch}"),
             }
         image["path"] = f"public/{test.image}.tar.zst"
     test_task = yaml_load(
         TEST_TASK.substitute(
             deadline=stringDate(self.now + DEADLINE),
             max_run_time=int(MAX_RUN_TIME.total_seconds()),
             now=stringDate(self.now),
             owner_email=OWNER_EMAIL,
             provisioner=PROVISIONER_ID,
             scheduler=SCHEDULER_ID,
             service_name=service.name,
             source_url=SOURCE_URL,
             task_group=self.task_group,
             test_name=test.name,
             worker=WORKER_TYPE,
         )
     )
     test_task["payload"]["image"] = image
     test_task["dependencies"].extend(deps)
     service_path = str(service.root.relative_to(self.services.root))
     test.update_task(
         test_task,
         self.github_event.clone_url,
         self.github_event.fetch_ref,
         self.github_event.commit,
         service_path,
     )
     task_id = slugId()
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, test_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, test_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating test task: %s", exc)
             raise
     return task_id
Esempio n. 16
0
    def build_tasks(self, parent_task_id, env=None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()
        for i in range(1, self.tasks + 1):
            task_id = slugId()
            task = {
                "taskGroupId": parent_task_id,
                "dependencies": [parent_task_id],
                "created": stringDate(now),
                "deadline":
                stringDate(now + timedelta(seconds=self.cycle_time)),
                "expires": stringDate(fromNow("1 month", now)),
                "extra": {},
                "metadata": {
                    "description": DESCRIPTION,
                    "name": f"Fuzzing task {self.id} - {i}/{self.tasks}",
                    "owner": OWNER_EMAIL,
                    "source": "https://github.com/MozillaSecurity/fuzzing-tc",
                },
                "payload": {
                    "artifacts": {
                        "project/fuzzing/private/logs": {
                            "expires": stringDate(fromNow("1 month", now)),
                            "path": "/logs/",
                            "type": "directory",
                        }
                    },
                    "cache": {},
                    "capabilities": {},
                    "env": {
                        "TASKCLUSTER_FUZZING_POOL": self.filename
                    },
                    "features": {
                        "taskclusterProxy": True
                    },
                    "image": self.container,
                    "maxRunTime": self.cycle_time,
                },
                "priority": "high",
                "provisionerId": PROVISIONER_ID,
                "workerType": self.id,
                "retries": 1,
                "routes": [],
                "schedulerId": SCHEDULER_ID,
                "scopes": self.scopes,
                "tags": {},
            }
            if env is not None:
                assert set(task["payload"]["env"]).isdisjoint(set(env))
                task["payload"]["env"].update(env)

            yield task_id, task
Esempio n. 17
0
def _create_task(buildername, repo_name, revision, metadata, requires=None):
    """Return takcluster task to trigger a buildbot builder.

    This function creates a generic task with the minimum amount of
    information required for the buildbot-bridge to consider it valid.
    You can establish a list dependencies to other tasks through the requires field.

    :param buildername: The name of a buildbot builder.
    :type buildername: str
    :param repo_name The name of a repository e.g. mozilla-inbound, alder et al.
    :type repo_name: str
    :param revision: Changeset ID of a revision.
    :type revision: str
    :param metadata: Dictionary with metadata values about the task.
    :type metadata: str
    :param requires: List of taskIds of other tasks which this task depends on.
    :type requires: str
    :returns: TaskCluster graph
    :rtype: dict

    """
    task = {
        'taskId': slugId(),
        'reruns': 0,  # Do not retry the task if it fails to run successfuly
        'task': {
            'workerType': 'buildbot-bridge',
            'provisionerId': 'buildbot-bridge',
            # XXX: check if tc client has something more like now
            'created': fromNow('0d'),
            'deadline': fromNow('1d'),
            'payload': {
                'buildername': buildername,
                'sourcestamp': {
                    'branch': repo_name,
                    'revision': revision
                },
                # Needed because of bug 1195751
                'properties': {
                    'product': get_builder_information(buildername)['properties']['product'],
                    'who': metadata['owner']
                }
            },
            'metadata': dict(metadata.items() + {'name': buildername}.items()),
        }
    }

    if requires:
        task['requires'] = requires

    return task
Esempio n. 18
0
    def queue_reduction_task(self, os_name: str, crash_id: int) -> None:
        """Queue a reduction task in Taskcluster.

        Arguments:
            os_name: The OS to schedule the task for.
            crash_id: The CrashManager crash ID to reduce.
        """
        if self.dry_run:
            return None
        dest_queue = TC_QUEUES[os_name]
        my_task_id = os.environ.get("TASK_ID")
        task_id = slugId()
        now = datetime.now(timezone.utc)
        if os_name == "windows":
            image_task_id = self.image_artifact_task(
                "project.fuzzing.orion.grizzly-win.master")
        elif os_name == "macosx":
            image_task_id = self.image_artifact_task(
                "project.fuzzing.orion.grizzly-macos.master")
        else:
            image_task_id = None
        task = yaml_load(REDUCE_TASKS[os_name].substitute(
            crash_id=crash_id,
            created=stringDate(now),
            deadline=stringDate(now + REDUCTION_DEADLINE),
            description=DESCRIPTION,
            expires=stringDate(now + REDUCTION_EXPIRES),
            image_task_id=image_task_id,
            max_run_time=int(REDUCTION_MAX_RUN_TIME.total_seconds()),
            os_name=os_name,
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            scheduler=SCHEDULER_ID,
            task_group=my_task_id,
            worker=dest_queue,
        ))
        queue = Taskcluster.get_service("queue")
        LOG.info("Creating task %s: %s", task_id, task["metadata"]["name"])
        try:
            queue.createTask(task_id, task)
        except TaskclusterFailure as exc:
            LOG.error("Error creating task: %s", exc)
            return None
        LOG.info("Marking %d Q4 (in progress)", crash_id)
        CrashManager().update_testcase_quality(crash_id,
                                               Quality.REDUCING.value)
Esempio n. 19
0
def main():
    # CLI args
    parser = argparse.ArgumentParser()
    parser.add_argument('--nb-tasks',
                        type=int,
                        default=5,
                        help='NB of tasks to create')
    parser.add_argument('--unique',
                        choices=('day', 'week'),
                        help='Trigger only one task per day or week')
    parser.add_argument('--group',
                        type=str,
                        default=slugId(),
                        help='Task group to create/update')
    parser.add_argument('--dry-run',
                        action='store_true',
                        default=False,
                        help='List actions without triggering any new task')
    args = parser.parse_args()

    # List existing tags & commits
    print('Group', args.group)
    queue = taskcluster.get_service('queue')
    try:
        group = queue.listTaskGroup(args.group)
        commits = [
            task['task']['payload']['env']['REVISION']
            for task in group['tasks']
            if task['status']['state'] not in ('failed', 'exception')
        ]
        print('Found {} commits processed in task group {}'.format(
            len(commits), args.group))
    except Exception as e:
        print('Invalid task group : {}'.format(e))
        commits = []

    # Trigger a task for each commit
    for commit in list_commits(args.nb_tasks, args.unique, commits):
        print(
            'Triggering commit {mercurial} from {timestamp}'.format(**commit))
        if args.dry_run:
            print('>>> No trigger on dry run')
        else:
            out = trigger_task(args.group, commit)
            print('>>>', out['status']['taskId'])
Esempio n. 20
0
    def __init__(self, parent_id, src, bug_id, **kwargs):
        """

        :param parent_id: ID of parent task
        :param src: Path to src artifact
        :param bug_id: Bug ID
        :param kwargs: Additional options
        """
        self.id = slugId()
        self.parent_id = parent_id
        self.src = src
        self.bug_id = bug_id

        prefix = self.TASK_NAME.lower().replace(" ", "-")
        self.dest = f"{prefix}-{self.bug_id}-{self.parent_id}.json"

        self.dependency = kwargs.get("dep", None)
        self.force_confirm = kwargs.get("force_confirm", False)
Esempio n. 21
0
    async def retry_task(self, group_id, hook_id, task_id):
        """
        Retry a Taskcluster task by:
        - fetching its definition
        - updating its dates & retry count
        - creating a new task
        Do NOT use rerunTask as it's deprecated AND not recommended
        https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
        """
        # Fetch task definition
        definition = await self.queue.task(task_id)

        # Update timestamps
        date_format = "%Y-%m-%dT%H:%M:%S.%f%z"
        now = datetime.utcnow()
        created = datetime.strptime(definition["created"], date_format)
        deadline = datetime.strptime(definition["deadline"], date_format)
        expires = datetime.strptime(definition["expires"], date_format)
        definition["created"] = stringDate(now)
        definition["deadline"] = stringDate(now + (deadline - created))
        definition["expires"] = stringDate(now + (expires - created))

        # Decrement retries count
        definition["retries"] -= 1
        if definition["retries"] < 0:
            logger.warn(
                "Will not retry task, no more retries left",
                task_id=task_id,
                group_id=group_id,
                hook_id=hook_id,
            )
            return

        # Trigger a new task with the updated definition
        new_task_id = slugId()
        logger.info("Retry task", old_task=task_id, new_task=new_task_id)
        await self.queue.createTask(new_task_id, definition)

        # Enqueue task to check later
        await self.bus.send(self.queue_name, (group_id, hook_id, new_task_id))

        return new_task_id
Esempio n. 22
0
    async def create_task(self, extra_env={}):
        '''
        Create a new task on Taskcluster
        '''
        assert self.hooks is not None
        assert self.queue is not None

        logger.info('Loading task definition',
                    hook=self.hook_id,
                    group=self.group_id)
        try:
            hook_definition = self.hooks.hook(self.group_id, self.hook_id)
        except Exception as e:
            logger.warn('Failed to fetch task definition',
                        hook=self.hook_id,
                        group=self.group_id,
                        err=e)
            return False

        # Update the env in task
        task_definition = copy.deepcopy(hook_definition['task'])
        task_definition['payload']['env'].update(extra_env)

        # Build task id
        task_id = slugId().decode('utf-8')

        # Set dates
        now = datetime.utcnow()
        task_definition['created'] = now
        task_definition['deadline'] = now + self.parse_deadline(
            hook_definition['deadline'])
        logger.info('Creating a new task',
                    id=task_id,
                    name=task_definition['metadata']['name'])  # noqa

        # Create a new task
        self.queue.createTask(task_id, task_definition)

        # Send task to monitoring
        await task_monitoring.add_task(self.group_id, self.hook_id, task_id)

        return task_id
Esempio n. 23
0
    async def retry_task(self, group_id, hook_id, task_id):
        '''
        Retry a Taskcluster task by:
        - fetching its definition
        - updating its dates & retry count
        - creating a new task
        Do NOT use rerunTask as it's deprecated AND not recommended
        https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
        '''
        assert self.queue is not None

        # Fetch task definition
        definition = self.queue.task(task_id)

        # Update timestamps
        date_format = '%Y-%m-%dT%H:%M:%S.%f%z'
        now = datetime.utcnow()
        created = datetime.strptime(definition['created'], date_format)
        deadline = datetime.strptime(definition['deadline'], date_format)
        expires = datetime.strptime(definition['expires'], date_format)
        definition['created'] = stringDate(now)
        definition['deadline'] = stringDate(now + (deadline - created))
        definition['expires'] = stringDate(now + (expires - created))

        # Decrement retries count
        definition['retries'] -= 1
        if definition['retries'] < 0:
            logger.warn('Will not retry task, no more retries left',
                        task_id=task_id,
                        group_id=group_id,
                        hook_id=hook_id)
            return

        # Trigger a new task with the updated definition
        new_task_id = slugId()
        logger.info('Retry task', old_task=task_id, new_task=new_task_id)
        self.queue.createTask(new_task_id, definition)

        # Monitor new task
        await self.add_task(group_id, hook_id, new_task_id)

        return new_task_id
Esempio n. 24
0
    def create_task(self, ttl=5, extra_env={}):
        '''
        Create a new task on Taskcluster
        '''
        assert self.queue is not None

        # Update the env in task
        task_definition = copy.deepcopy(self.task_definition)
        task_definition['payload']['env'].update(extra_env)

        # Build task id
        task_id = slugId().decode('utf-8')

        # Set dates
        now = datetime.utcnow()
        task_definition['created'] = now
        task_definition['deadline'] = now + timedelta(seconds=ttl * 3600)
        logger.info('Creating a new task', id=task_id, name=task_definition['metadata']['name'])  # noqa

        # Create a new task
        return self.queue.createTask(task_id, task_definition)
Esempio n. 25
0
    def queue_reduction_task(self, os_name, crash_id):
        """Queue a reduction task in Taskcluster.

        Arguments:
            os_name (str): The OS to schedule the task for.
            crash_id (int): The CrashManager crash ID to reduce.

        Returns:
            None
        """
        if self.dry_run:
            return
        dest_queue = TC_QUEUES[os_name]
        my_task_id = os.environ.get("TASK_ID")
        task_id = slugId()
        now = datetime.utcnow()
        task = yaml_load(
            REDUCE_TASK.substitute(
                task_group=my_task_id,
                now=stringDate(now),
                deadline=stringDate(now + REDUCTION_DEADLINE),
                expires=stringDate(now + REDUCTION_EXPIRES),
                provisioner=PROVISIONER_ID,
                scheduler=SCHEDULER_ID,
                worker=dest_queue,
                max_run_time=int(REDUCTION_MAX_RUN_TIME.total_seconds()),
                description=DESCRIPTION,
                owner_email=OWNER_EMAIL,
                crash_id=crash_id,
                os_name=os_name,
            ))
        queue = Taskcluster.get_service("queue")
        LOG.info("Creating task %s: %s", task_id, task["metadata"]["name"])
        try:
            queue.createTask(task_id, task)
        except TaskclusterFailure as exc:
            LOG.error("Error creating task: %s", exc)
            return
        LOG.info("Marking %d Q4 (in progress)", crash_id)
        CrashManager().update_testcase_quality(crash_id, 4)
Esempio n. 26
0
    async def retry_task(self, group_id, hook_id, task_id):
        '''
        Retry a Taskcluster task by:
        - fetching its definition
        - updating its dates & retry count
        - creating a new task
        Do NOT use rerunTask as it's deprecated AND not recommended
        https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
        '''
        assert self.queue is not None

        # Fetch task definition
        definition = self.queue.task(task_id)

        # Update timestamps
        date_format = '%Y-%m-%dT%H:%M:%S.%f%z'
        now = datetime.utcnow()
        created = datetime.strptime(definition['created'], date_format)
        deadline = datetime.strptime(definition['deadline'], date_format)
        expires = datetime.strptime(definition['expires'], date_format)
        definition['created'] = stringDate(now)
        definition['deadline'] = stringDate(now + (deadline - created))
        definition['expires'] = stringDate(now + (expires - created))

        # Decrement retries count
        definition['retries'] -= 1
        if definition['retries'] < 0:
            logger.warn('Will not retry task, no more retries left', task_id=task_id, group_id=group_id, hook_id=hook_id)
            return

        # Trigger a new task with the updated definition
        new_task_id = slugId()
        logger.info('Retry task', old_task=task_id, new_task=new_task_id)
        self.queue.createTask(new_task_id, definition)

        # Monitor new task
        await self.add_task(group_id, hook_id, new_task_id)

        return new_task_id
Esempio n. 27
0
def create_task(repo_name, revision, **kwargs):
    """ Create a TC task.

    NOTE: This code needs to be tested for normal TC tasks to determine
    if the default values would also work for non BBB tasks.
    """
    metadata = _query_metadata(repo_name, revision, name=kwargs.get("metadata_name"))

    task_id = kwargs.get("taskId", slugId())

    task_definition = {
        "taskId": task_id,
        # Do not retry the task if it fails to run successfuly
        "reruns": kwargs.get("reruns", 0),
        "task": {
            "workerType": kwargs["workerType"],  # mandatory
            "provisionerId": kwargs["provisionerId"],  # mandatory
            "created": kwargs.get("created", fromNow("0d")),
            "deadline": kwargs.get("deadline", fromNow("1d")),
            "expires": kwargs.get("deadline", fromNow("1d")),
            "payload": kwargs.get("payload", {}),
            "metadata": kwargs.get("metadata", metadata),
            "schedulerId": kwargs.get("schedulerId", "task-graph-scheduler"),
            "tags": kwargs.get("tags", {}),
            "extra": kwargs.get("extra", {}),
            "routes": kwargs.get("routes", []),
            "priority": kwargs.get("priority", "normal"),
            "retries": kwargs.get("retries", 5),
            "scopes": kwargs.get("scopes", []),
        },
    }

    if kwargs.get("taskGroupId"):
        task_definition["task"]["taskGroupId"] = (kwargs.get("taskGroupId", task_id),)

    return task_definition
Esempio n. 28
0
def createTemporaryCredentials(clientId,
                               accessToken,
                               start,
                               expiry,
                               scopes,
                               name=None):
    """ Create a set of temporary credentials

    Callers should not apply any clock skew; clock drift is accounted for by
    auth service.

    clientId: the issuing clientId
    accessToken: the issuer's accessToken
    start: start time of credentials (datetime.datetime)
    expiry: expiration time of credentials, (datetime.datetime)
    scopes: list of scopes granted
    name: credential name (optional)

    Returns a dictionary in the form:
        { 'clientId': str, 'accessToken: str, 'certificate': str}
    """

    for scope in scopes:
        if not isinstance(scope, six.string_types):
            raise exceptions.TaskclusterFailure('Scope must be string')

    # Credentials can only be valid for 31 days.  I hope that
    # this is validated on the server somehow...

    if expiry - start > datetime.timedelta(days=31):
        raise exceptions.TaskclusterFailure('Only 31 days allowed')

    # We multiply times by 1000 because the auth service is JS and as a result
    # uses milliseconds instead of seconds
    cert = dict(
        version=1,
        scopes=scopes,
        start=calendar.timegm(start.utctimetuple()) * 1000,
        expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
        seed=utils.slugId() + utils.slugId(),
    )

    # if this is a named temporary credential, include the issuer in the certificate
    if name:
        cert['issuer'] = utils.toStr(clientId)

    sig = ['version:' + utils.toStr(cert['version'])]
    if name:
        sig.extend([
            'clientId:' + utils.toStr(name),
            'issuer:' + utils.toStr(clientId),
        ])
    sig.extend([
        'seed:' + utils.toStr(cert['seed']), 'start:' +
        utils.toStr(cert['start']), 'expiry:' +
        utils.toStr(cert['expiry']), 'scopes:'
    ] + scopes)
    sigStr = '\n'.join(sig).encode()

    if isinstance(accessToken, six.text_type):
        accessToken = accessToken.encode()
    sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()

    cert['signature'] = utils.encodeStringForB64Header(sig)

    newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
    newToken = utils.makeB64UrlSafe(
        utils.encodeStringForB64Header(newToken)).replace(b'=', b'')

    return {
        'clientId': name or clientId,
        'accessToken': newToken,
        'certificate': utils.dumpJson(cert),
    }
Esempio n. 29
0
def _generate_tasks(repo_name, revision, builders_graph, metadata=None, task_graph_id=None,
                    parent_task_id=None, required_task_ids=[], **kwargs):
    """ Generate a TC json object with tasks based on a graph of graphs of buildernames

    :param repo_name: The name of a repository e.g. mozilla-inbound
    :type repo_name: str
    :param revision: Changeset ID of a revision.
    :type revision: str
    :param builders_graph:
        It is a graph made up of a dictionary where each
        key is a Buildbot buildername. The value for each key is either None
        or another graph of dependent builders.
    :type builders_graph: dict
    :param metadata: Metadata information to set for the tasks.
    :type metadata: json
    :param task_graph_id: TC graph id to which this task belongs to
    :type task_graph_id: str
    :param parent_task_id: Task from which to find artifacts. It is not a dependency.
    :type parent_task_id: int
    :returns: A dictionary of TC tasks
    :rtype: dict

    """
    if not type(required_task_ids) == list:
        raise MozciError("required_task_ids must be a list")

    tasks = []

    if type(builders_graph) != dict:
        raise MozciError("The buildbot graph should be a dictionary")

    # Let's iterate through the root builders in this graph
    for builder, dependent_graph in builders_graph.iteritems():
        # Due to bug 1221091 this will be used to know to which task
        # the artifacts will be uploaded to
        upload_to_task_id = slugId()
        properties = {'upload_to_task_id': upload_to_task_id}
        builder_details = get_buildername_metadata(builder)

        # Bug 1274483 - Android multi-locale nightly builds need to upload to two different tasks,
        # thus, it fails when we tell it to upload to the same task twice.
        if builder_details['platform_name'].startswith('android') and \
           builder_details['nightly'] is True and \
           'l10n' not in builder:
            properties = {}

        task = _create_task(
            buildername=builder,
            repo_name=repo_name,
            revision=revision,
            metadata=metadata,
            task_graph_id=task_graph_id,
            parent_task_id=parent_task_id,
            properties=properties,
            requires=required_task_ids,
            **kwargs
        )
        task_id = task['taskId']
        tasks.append(task)

        if dependent_graph:
            # If there are builders this builder triggers let's add them as well
            tasks = tasks + _generate_tasks(
                repo_name=repo_name,
                revision=revision,
                builders_graph=dependent_graph,
                metadata=metadata,
                task_graph_id=task_graph_id,
                # The parent task id is used to find artifacts; only one can be given
                parent_task_id=upload_to_task_id,
                # The required tasks are the one holding this task from running
                required_task_ids=[task_id],
                **kwargs
            )

    return tasks
Esempio n. 30
0
def _generate_tc_tasks_from_builders(builders, repo_name, revision):
    """ Return TC tasks based on a list of builders.

    Input: a list of builders and a revision
    Output: list of TC tasks base on builders we receive

    :param builders: List of builder names
    :type builders: list
    :param repo_name: push revision
    :type repo_name: str
    :param revision: push revision
    :type revision: str
    :return: TC tasks
    :rtype: dict

    """
    tasks = []
    build_builders = {}

    # We need to determine what upstream jobs need to be triggered besides the
    # builders already on our list
    for builder in builders:
        if is_upstream(builder):
            properties = {'upload_to_task_id': slugId()}

            # Bug 1274483 - Android multi-locale nightly builds need to upload to two different
            # tasks, thus, it fails when we tell it to upload to the same task twice.
            builder_details = get_buildername_metadata(builder)
            if builder_details['platform_name'].startswith('android') and \
               builder_details['nightly'] is True and \
               'l10n' not in builder:
                properties = {}

            task = _create_task(
                buildername=builder,
                repo_name=repo_name,
                revision=revision,
                # task_graph_id=task_graph_id,
                properties=properties,
            )
            tasks.append(task)

            # We want to keep track of how many build builders we have
            build_builders[builder] = task

    for builder in builders:
        if is_downstream(builder):
            # For test jobs, determine_trigger_objective()[0] can be 3 things:
            # - the build job, if no build job exists
            # - the test job, if the build job is already completed
            # - None, if the build job is running
            objective, package_url, tests_url = \
                determine_trigger_objective(revision, builder)

            # The build job is already completed, we can trigger the test job
            if objective == builder:
                if objective in build_builders:
                    LOG.warning("We're creating a new build even though there's "
                                "already an existing completed build we could have "
                                "used. We hope you wanted to do this.")
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        # task_graph_id=task_graph_id,
                        parent_task_id=build_builders[objective]['taskId'],
                        properties={'upload_to_task_id': slugId()},
                    )
                    tasks.append(task)
                else:
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        properties={
                            'packageUrl': package_url,
                            'testUrl': tests_url
                        },
                    )
                    tasks.append(task)

            # The build job is running, there is nothing we can do
            elif objective is None:
                LOG.warning("We can add %s builder since the build associated "
                            "is running. This is because it is a Buildbot job.")
                pass

            # We need to trigger the build job and the test job
            else:
                if objective not in build_builders:
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        # task_graph_id=task_graph_id,
                        properties={'upload_to_task_id': slugId()},
                    )
                    tasks.append(task)
                    taskId = task['taskId']
                else:
                    taskId = build_builders[objective]['taskId']

                # Add test job
                task = _create_task(
                    buildername=builder,
                    repo_name=repo_name,
                    revision=revision,
                    # task_graph_id=task_graph_id,
                    parent_task_id=taskId,
                )
                tasks.append(task)

    return tasks
Esempio n. 31
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token", None),
        }
    }
    configs_workdir = 'buildbot-configs'
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(tc_config)
    index = Index(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                for release in rr.new_releases:
                    log.info('Got a new release request: %s' % release)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial, args=(buildbot_configs, configs_workdir), kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=configs_workdir)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)

    # TODO: this won't work for Thunderbird...do we care?
    branch = release["branch"].split("/")[-1]
    branchConfig = readBranchConfig(path.join(configs_workdir, "mozilla"), branch=branch)

    rc = 0
    for release in rr.new_releases:
        try:
            rr.update_status(release, 'Generating task graph')
            l10n_changesets = parsePlainL10nChangesets(rr.get_release_l10n(release["name"]))

            kwargs = {
                "public_key": docker_worker_key,
                "version": release["version"],
                "appVersion": getAppVersion(release["version"]),
                "buildNumber": release["buildNumber"],
                "source_enabled": True,
                "repo_path": release["branch"],
                "revision": release["mozillaRevision"],
                "product": release["product"],
                "partial_updates": getPartials(release),
                "branch": branch,
                "updates_enabled": bool(release["partials"]),
                "enUS_platforms": branchConfig["release_platforms"],
                "l10n_config": get_l10n_config(release, branchConfig, branch, l10n_changesets, index),
                "en_US_config": get_en_US_config(release, branchConfig, branch, index),
                "verifyConfigs": {},
                "balrog_api_root": branchConfig["balrog_api_root"],
                "balrog_username": balrog_username,
                "balrog_password": balrog_password,
                # TODO: stagin specific, make them configurable
                "signing_class": "dep-signing",
            }
            verifyConfigTemplate = "{branch}-{product}-{plat}.cfg"
            for plat in branchConfig["release_platforms"]:
                kwargs["verifyConfigs"][plat] = verifyConfigTemplate.format(
                    branch=kwargs['branch'],
                    product=kwargs['product'],
                    plat=plat,
                )

            validate_graph_kwargs(**kwargs)

            graph_id = slugId()
            graph = make_task_graph(**kwargs)

            rr.update_status(release, "Submitting task graph")

            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
        except:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.update_status(release, 'Failed to start release promotion')
            log.exception("Failed to start release promotion for {}: ".format(release))

    if rc != 0:
        sys.exit(rc)
Esempio n. 32
0
def _generate_tc_tasks_from_builders(builders, repo_name, revision):
    """ Return TC tasks based on a list of builders.

    Input: a list of builders and a revision
    Output: list of TC tasks base on builders we receive

    :param builders: List of builder names
    :type builders: list
    :param repo_name: push revision
    :type repo_name: str
    :param revision: push revision
    :type revision: str
    :return: TC tasks
    :rtype: dict

    """
    tasks = []
    build_builders = {}

    # We need to determine what upstream jobs need to be triggered besides the
    # builders already on our list
    for builder in builders:
        if is_upstream(builder):
            task = _create_task(
                buildername=builder,
                repo_name=repo_name,
                revision=revision,
                # task_graph_id=task_graph_id,
                properties={"upload_to_task_id": slugId()},
            )
            tasks.append(task)

            # We want to keep track of how many build builders we have
            build_builders[builder] = task

    for builder in builders:
        if is_downstream(builder):
            # For test jobs, determine_trigger_objective()[0] can be 3 things:
            # - the build job, if no build job exists
            # - the test job, if the build job is already completed
            # - None, if the build job is running
            objective, package_url, tests_url = determine_trigger_objective(revision, builder)

            # The build job is already completed, we can trigger the test job
            if objective == builder:
                if objective in build_builders:
                    LOG.warning(
                        "We're creating a new build even though there's "
                        "already an existing completed build we could have "
                        "used. We hope you wanted to do this."
                    )
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        # task_graph_id=task_graph_id,
                        parent_task_id=build_builders[objective]["taskId"],
                        properties={"upload_to_task_id": slugId()},
                    )
                    tasks.append(task)
                else:
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        properties={"packageUrl": package_url, "testUrl": tests_url},
                    )
                    tasks.append(task)

            # The build job is running, there is nothing we can do
            elif objective is None:
                LOG.warning(
                    "We can add %s builder since the build associated "
                    "is running. This is because it is a Buildbot job."
                )
                pass

            # We need to trigger the build job and the test job
            else:
                if objective not in build_builders:
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        # task_graph_id=task_graph_id,
                        properties={"upload_to_task_id": slugId()},
                    )
                    tasks.append(task)
                    taskId = task["taskId"]
                else:
                    taskId = build_builders[objective]["taskId"]

                # Add test job
                task = _create_task(
                    buildername=builder,
                    repo_name=repo_name,
                    revision=revision,
                    # task_graph_id=task_graph_id,
                    parent_task_id=taskId,
                )
                tasks.append(task)

    return tasks
Esempio n. 33
0
def trigger_missing(server_address: str, out_dir: str = ".") -> None:
    triggered_revisions_path = os.path.join(out_dir, "triggered_revisions.zst")

    url = f"https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.{secrets[secrets.APP_CHANNEL]}.cron.latest/artifacts/public/triggered_revisions.zst"  # noqa
    r = requests.head(url, allow_redirects=True)
    if r.status_code != 404:
        utils.download_file(url, triggered_revisions_path)

    try:
        dctx = zstandard.ZstdDecompressor()
        with open(triggered_revisions_path, "rb") as zf:
            with dctx.stream_reader(zf) as reader:
                with io.TextIOWrapper(reader, encoding="ascii") as f:
                    triggered_revisions = set(rev
                                              for rev in f.read().splitlines())
    except FileNotFoundError:
        triggered_revisions = set()

    # Get all mozilla-central revisions from the past year.
    days = 365 if secrets[secrets.APP_CHANNEL] == "production" else 30
    a_year_ago = datetime.utcnow() - timedelta(days=days)
    with hgmo.HGMO(server_address=server_address) as hgmo_server:
        data = hgmo_server.get_pushes(
            startDate=a_year_ago.strftime("%Y-%m-%d"),
            full=False,
            tipsonly=True)

    revisions = [(push_data["changesets"][0], int(push_data["date"]))
                 for push_data in data["pushes"].values()]

    logger.info(f"{len(revisions)} pushes in the past year")

    assert (secrets[secrets.GOOGLE_CLOUD_STORAGE]
            is not None), "Missing GOOGLE_CLOUD_STORAGE secret"
    bucket = get_bucket(secrets[secrets.GOOGLE_CLOUD_STORAGE])

    missing_revisions = []
    for revision, timestamp in revisions:
        # Skip revisions that have already been triggered. If they are still missing,
        # it means there is a problem that is preventing us from ingesting them.
        if revision in triggered_revisions:
            continue

        # If the revision was already ingested, we don't need to trigger ingestion for it again.
        if uploader.gcp_covdir_exists(bucket, "mozilla-central", revision,
                                      "all", "all"):
            triggered_revisions.add(revision)
            continue

        missing_revisions.append((revision, timestamp))

    logger.info(f"{len(missing_revisions)} missing pushes in the past year")

    yesterday = int(datetime.timestamp(datetime.utcnow() - timedelta(days=1)))

    task_group_id = slugId()
    logger.info(f"Triggering tasks in the {task_group_id} group")
    triggered = 0
    for revision, timestamp in reversed(missing_revisions):
        # If it's older than yesterday, we assume the group finished.
        # If it is newer than yesterday, we load the group and check if all tasks in it finished.
        if timestamp > yesterday:
            decision_task_id = taskcluster.get_decision_task(
                "mozilla-central", revision)
            if decision_task_id is None:
                continue

            group = taskcluster.get_task_details(
                decision_task_id)["taskGroupId"]
            if not all(task["status"]["state"] in taskcluster.FINISHED_STATUSES
                       for task in taskcluster.get_tasks_in_group(group)
                       if taskcluster.is_coverage_task(task["task"])):
                continue

        trigger_task(task_group_id, revision)
        triggered_revisions.add(revision)
        triggered += 1
        if triggered == MAXIMUM_TRIGGERS:
            break

    cctx = zstandard.ZstdCompressor(threads=-1)
    with open(triggered_revisions_path, "wb") as zf:
        with cctx.stream_writer(zf) as compressor:
            with io.TextIOWrapper(compressor, encoding="ascii") as f:
                f.write("\n".join(triggered_revisions))
Esempio n. 34
0
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
    """ Create a set of temporary credentials

    Callers should not apply any clock skew; clock drift is accounted for by
    auth service.

    clientId: the issuing clientId
    accessToken: the issuer's accessToken
    start: start time of credentials (datetime.datetime)
    expiry: expiration time of credentials, (datetime.datetime)
    scopes: list of scopes granted
    name: credential name (optional)

    Returns a dictionary in the form:
        { 'clientId': str, 'accessToken: str, 'certificate': str}
    """

    for scope in scopes:
        if not isinstance(scope, six.string_types):
            raise exceptions.TaskclusterFailure('Scope must be string')

    # Credentials can only be valid for 31 days.  I hope that
    # this is validated on the server somehow...

    if expiry - start > datetime.timedelta(days=31):
        raise exceptions.TaskclusterFailure('Only 31 days allowed')

    # We multiply times by 1000 because the auth service is JS and as a result
    # uses milliseconds instead of seconds
    cert = dict(
        version=1,
        scopes=scopes,
        start=calendar.timegm(start.utctimetuple()) * 1000,
        expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
        seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
    )

    # if this is a named temporary credential, include the issuer in the certificate
    if name:
        cert['issuer'] = utils.toStr(clientId)

    sig = ['version:' + utils.toStr(cert['version'])]
    if name:
        sig.extend([
            'clientId:' + utils.toStr(name),
            'issuer:' + utils.toStr(clientId),
        ])
    sig.extend([
        'seed:' + utils.toStr(cert['seed']),
        'start:' + utils.toStr(cert['start']),
        'expiry:' + utils.toStr(cert['expiry']),
        'scopes:'
    ] + scopes)
    sigStr = '\n'.join(sig).encode()

    if isinstance(accessToken, six.text_type):
        accessToken = accessToken.encode()
    sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()

    cert['signature'] = utils.encodeStringForB64Header(sig)

    newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
    newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')

    return {
        'clientId': name or clientId,
        'accessToken': newToken,
        'certificate': utils.dumpJson(cert),
    }
Esempio n. 35
0
    def create_tasks(self):
        """Create test/build/push tasks in Taskcluster.

        Returns:
            None
        """
        if self.github_event.event_type == "release":
            LOG.warning("Detected release event. Nothing to do!")
            return
        should_push = (
            self.github_event.event_type == "push"
            and self.github_event.branch == self.push_branch
        )
        service_build_tasks = {service: slugId() for service in self.services}
        recipe_test_tasks = {recipe: slugId() for recipe in self.services.recipes}
        test_tasks_created = set()
        build_tasks_created = set()
        push_tasks_created = set()
        if not should_push:
            LOG.info(
                "Not pushing to Docker Hub (event is %s, branch is %s, only push %s)",
                self.github_event.event_type,
                self.github_event.branch,
                self.push_branch,
            )
        to_create = sorted(
            self.services.recipes.values(), key=lambda x: x.name
        ) + sorted(self.services.values(), key=lambda x: x.name)
        while to_create:
            obj = to_create.pop(0)
            is_svc = isinstance(obj, Service)

            if not obj.dirty:
                if is_svc:
                    LOG.info("Service %s doesn't need to be rebuilt", obj.name)
                continue
            dirty_dep_tasks = [
                service_build_tasks[dep]
                for dep in obj.service_deps
                if self.services[dep].dirty
            ]
            if is_svc:
                dirty_test_dep_tasks = [
                    service_build_tasks[test.image]
                    for test in obj.tests
                    if test.image in service_build_tasks
                    and self.services[test.image].dirty
                ]
            else:
                dirty_test_dep_tasks = []
            dirty_recipe_test_tasks = [
                recipe_test_tasks[recipe]
                for recipe in obj.recipe_deps
                if self.services.recipes[recipe].dirty
            ]

            pending_deps = (
                set(dirty_dep_tasks) | set(dirty_test_dep_tasks)
            ) - build_tasks_created
            pending_deps |= set(dirty_recipe_test_tasks) - test_tasks_created
            if pending_deps:
                LOG.debug(
                    "Can't create %s %s tasks before dependencies: %s",
                    type(obj).__name__,
                    obj.name,
                    list(pending_deps),
                )
                to_create.append(obj)
                continue

            if is_svc:
                test_tasks = []
                for test in obj.tests:
                    task_id = self._create_svc_test_task(obj, test, service_build_tasks)
                    test_tasks_created.add(task_id)
                    test_tasks.append(task_id)
                test_tasks.extend(dirty_recipe_test_tasks)

                build_tasks_created.add(
                    self._create_build_task(
                        obj, dirty_dep_tasks, test_tasks, service_build_tasks
                    )
                )
                if should_push:
                    push_tasks_created.add(
                        self._create_push_task(obj, service_build_tasks)
                    )
            else:
                test_tasks_created.add(
                    self._create_recipe_test_task(
                        obj,
                        dirty_dep_tasks + dirty_recipe_test_tasks,
                        recipe_test_tasks,
                    )
                )
        LOG.info(
            "%s %d test tasks, %d build tasks and %d push tasks",
            self._created_str,
            len(test_tasks_created),
            len(build_tasks_created),
            len(push_tasks_created),
        )
Esempio n. 36
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to_announce', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    signing_pvt_key = get_config(config, 'signing', 'pvt_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token", None),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)
    extra_balrog_submitter_params = get_config(config, "balrog", "extra_balrog_submitter_params", None)
    beetmover_aws_access_key_id = get_config(config, "beetmover", "aws_access_key_id", None)
    beetmover_aws_secret_access_key = get_config(config, "beetmover", "aws_secret_access_key", None)
    gpg_key_path = get_config(config, "signing", "gpg_key_path", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(rr)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial, args=(buildbot_configs, CONFIGS_WORKDIR), kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
            postrelease_bouncer_aliases_enabled = branchConfig['postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig['postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        ship_it_product_name = release['product']
        tc_product_name = branchConfig['stage_product'][ship_it_product_name]
        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            if not are_en_us_builds_completed(index, release_name=release['name'], submitted_at=release['submittedAt'],
                                              branch=release['branchShortName'], revision=release['mozillaRevision'],
                                              tc_product_name=tc_product_name, platforms=platforms):
                log.info('Builds are not completed yet, skipping release "%s" for now', release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s', release['name'])
            graph_id = slugId()

            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key": docker_worker_key,
                "version": release["version"],
                # ESR should not use "esr" suffix here:
                "next_version": bump_version(release["version"].replace("esr", "")),
                "appVersion": getAppVersion(release["version"]),
                "buildNumber": release["buildNumber"],
                "source_enabled": True,
                "checksums_enabled": True,
                "repo_path": release["branch"],
                "revision": release["mozillaRevision"],
                "product": release["product"],
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset": release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates": release['partial_updates'],
                "branch": release['branchShortName'],
                "updates_enabled": bool(release["partials"]),
                "l10n_config": get_l10n_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets']
                ),
                "en_US_config": get_en_US_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms']
                ),
                "verifyConfigs": {},
                "balrog_api_root": branchConfig["balrog_api_root"],
                "funsize_balrog_api_root": branchConfig["funsize_balrog_api_root"],
                "balrog_username": balrog_username,
                "balrog_password": balrog_password,
                "beetmover_aws_access_key_id": beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key": beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class": "release-signing",
                "bouncer_enabled": branchConfig["bouncer_enabled"],
                "updates_builder_enabled": branchConfig["updates_builder_enabled"],
                "update_verify_enabled": branchConfig["update_verify_enabled"],
                "release_channels": release_channels,
                "final_verify_channels": final_verify_channels,
                "final_verify_platforms": branchConfig['release_platforms'],
                "uptake_monitoring_platforms": branchConfig['release_platforms'],
                "signing_pvt_key": signing_pvt_key,
                "build_tools_repo_path": branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled": branchConfig['push_to_candidates_enabled'],
                "postrelease_bouncer_aliases_enabled": postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled": branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url": branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled": postrelease_enabled,
                "postrelease_mark_as_shipped_enabled": postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled": push_to_releases_enabled,
                "push_to_releases_automatic": branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket": branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms": branchConfig.get("partner_repacks_platforms", []),
                "l10n_changesets": release['l10n_changesets'],
                "extra_balrog_submitter_params": extra_balrog_submitter_params,
                "publish_to_balrog_channels": publish_to_balrog_channels,
                "snap_enabled": branchConfig.get("snap_enabled", False),
            }

            validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server, from_=notify_from,
                                  to=notify_to, release=release,
                                  task_group_id=graph_id, l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s' % (graph_id, exception)
            )
            log.exception('Failed to start release "%s" promotion for graph %s. Error(s): %s',
                          release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Esempio n. 37
0
def main():
    # CLI args
    parser = argparse.ArgumentParser()
    parser.add_argument("--nb-tasks",
                        type=int,
                        default=5,
                        help="NB of tasks to create")
    parser.add_argument("--group",
                        type=str,
                        default=slugId(),
                        help="Task group to create/update")
    parser.add_argument(
        "--dry-run",
        action="store_true",
        default=False,
        help="List actions without triggering any new task",
    )
    parser.add_argument("history",
                        type=open,
                        help="JSON payload of /v2/history endpoint")
    args = parser.parse_args()

    # Setup Taskcluster
    taskcluster_config.auth()
    secrets.load(os.environ["TASKCLUSTER_SECRET"])

    # List existing tags & commits
    print("Group", args.group)
    queue = taskcluster_config.get_service("queue")
    try:
        group = queue.listTaskGroup(args.group)
        commits = [
            task["task"]["payload"]["env"]["REVISION"]
            for task in group["tasks"]
            if task["status"]["state"] not in ("failed", "exception")
        ]
        print("Found {} commits processed in task group {}".format(
            len(commits), args.group))
    except Exception as e:
        print("Invalid task group : {}".format(e))
        commits = []

    # Read the history file
    history = json.load(args.history)

    # Load initial dates from our history
    history_dates = {
        item["changeset"]: datetime.fromtimestamp(item["date"]).date()
        for item in history
    }
    dates = [
        history_dates[commit] for commit in commits if commit in history_dates
    ]

    # Trigger a task for each commit
    nb = 0
    for commit in history:
        date = datetime.fromtimestamp(commit["date"])
        if nb >= args.nb_tasks:
            break
        if commit["changeset"] in commits:
            print(
                f"Skipping commit {commit['changeset']} from {date} : already processed"
            )
            continue

        if date.date() in dates:
            print(
                f"Skipping commit {commit['changeset']} from {date} : same day"
            )
            continue

        print(f"Triggering commit {commit['changeset']} from {date}")
        if args.dry_run:
            print(">>> No trigger on dry run")
        else:
            out = trigger_task(args.group, commit)
            print(">>>", out["status"]["taskId"])
        nb += 1
        dates.append(date.date())
def _generate_tasks(repo_name, revision, builders_graph, task_graph_id=None,
                    parent_task_id=None, required_task_ids=[], **kwargs):
    """ Generate a TC json object with tasks based on a graph of graphs of buildernames

    :param repo_name: The name of a repository e.g. mozilla-inbound
    :type repo_name: str
    :param revision: Changeset ID of a revision.
    :type revision: str
    :param builders_graph:
        It is a graph made up of a dictionary where each
        key is a Buildbot buildername. The value for each key is either None
        or another graph of dependent builders.
    :type builders_graph: dict
    :param task_graph_id: TC graph id to which this task belongs to
    :type task_graph_id: str
    :param parent_task_id: Task from which to find artifacts. It is not a dependency.
    :type parent_task_id: int
    :returns: A dictionary of TC tasks
    :rtype: dict

    """
    if not type(required_task_ids) == list:
        raise MozciError("required_task_ids must be a list")

    tasks = []

    if type(builders_graph) != dict:
        raise MozciError("The buildbot graph should be a dictionary")

    # Let's iterate through the root builders in this graph
    for builder, dependent_graph in builders_graph.iteritems():
        # Due to bug 1221091 this will be used to know to which task
        # the artifacts will be uploaded to
        upload_to_task_id = slugId()
        task = _create_task(
            buildername=builder,
            repo_name=repo_name,
            revision=revision,
            task_graph_id=task_graph_id,
            parent_task_id=parent_task_id,
            properties={'upload_to_task_id': upload_to_task_id},
            requires=required_task_ids,
            **kwargs
        )
        task_id = task['taskId']
        tasks.append(task)

        if dependent_graph:
            # If there are builders this builder triggers let's add them as well
            tasks = tasks + _generate_tasks(
                repo_name=repo_name,
                revision=revision,
                builders_graph=dependent_graph,
                task_graph_id=task_graph_id,
                # The parent task id is used to find artifacts; only one can be given
                parent_task_id=upload_to_task_id,
                # The required tasks are the one holding this task from running
                required_task_ids=[task_id],
                **kwargs
            )

    return tasks
Esempio n. 39
0
def main(release_runner_config, release_config, tc_config):

    api_root = release_runner_config.get('api', 'api_root')
    username = release_runner_config.get('api', 'username')
    password = release_runner_config.get('api', 'password')

    scheduler = Scheduler(tc_config)
    index = Index(tc_config)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    graph_id = slugId()
    log.info('Generating task graph')
    kwargs = {
        # release-runner.ini
        "signing_pvt_key": release_config['signing_pvt_key'],
        "public_key": release_config['docker_worker_key'],
        "balrog_username": release_config['balrog_username'],
        "balrog_password": release_config['balrog_password'],
        "beetmover_aws_access_key_id": release_config['beetmover_aws_access_key_id'],
        "beetmover_aws_secret_access_key": release_config['beetmover_aws_secret_access_key'],
        "signing_class": "release-signing",  # TODO: stagin specific, make them configurable

        # ship-it items
        "version": release_config["version"],
        "revision": release_config["mozilla_revision"],
        "mozharness_changeset": release_config["mozharness_changeset"] or release_config["mozilla_revision"],
        "buildNumber": release_config["build_number"],
        "l10n_changesets": release_config["l10n_changesets"],

        # was branchConfig items
        "funsize_balrog_api_root": release_config["funsize_balrog_api_root"],
        "balrog_api_root": release_config["balrog_api_root"],
        "build_tools_repo_path": release_config['build_tools_repo_path'],
        "tuxedo_server_url": release_config['tuxedo_server_url'],
        "uptake_monitoring_enabled": release_config['uptake_monitoring_enabled'],
        "beetmover_candidates_bucket": release_config["beetmover_candidates_bucket"],
        "bouncer_enabled": release_config["bouncer_enabled"],
        "updates_builder_enabled": release_config["updates_builder_enabled"],
        "update_verify_enabled": release_config["update_verify_enabled"],
        "push_to_candidates_enabled": release_config['push_to_candidates_enabled'],
        "postrelease_bouncer_aliases_enabled": release_config['postrelease_bouncer_aliases_enabled'],
        "postrelease_version_bump_enabled": release_config['postrelease_version_bump_enabled'],
        "push_to_releases_automatic": release_config['push_to_releases_automatic'],
        "partner_repacks_platforms": release_config["partner_repacks_platforms"],

        "repo_path": release_config["repo_path"],
        "branch": release_config["branch"],
        "product": release_config["product"],
        "release_channels": release_config['channels'],
        "final_verify_channels": release_config['final_verify_channels'],
        "final_verify_platforms": release_config['final_verify_platforms'],
        "uptake_monitoring_platforms": release_config['uptake_monitoring_platforms'],
        "source_enabled": release_config["source_enabled"],
        "checksums_enabled": release_config["checksums_enabled"],
        "updates_enabled": release_config["updates_enabled"],
        "push_to_releases_enabled": release_config["push_to_releases_enabled"],

        "verifyConfigs": {},
        # ESR should not use "esr" suffix here:
        "next_version": bump_version(release_config["version"].replace("esr", "")),
        "appVersion": getAppVersion(release_config["version"]),
        "partial_updates": get_partials(rr, release_config["partials"],
                                        release_config['product']),
        # in release-runner.py world we have a concept of branchConfig and release (shipit) vars
        # todo fix get_en_US_config and en_US_config helper methods to not require both
        "l10n_config": get_l10n_config(
            index=index, product=release_config["product"],
            branch=release_config["branch"],
            revision=release_config["mozilla_revision"],
            platforms=release_config['platforms'],
            l10n_platforms=release_config['l10n_release_platforms'] or {},
            l10n_changesets=release_config["l10n_changesets"]
        ),
        "en_US_config": get_en_US_config(
            index=index, product=release_config["product"],
            branch=release_config["branch"],
            revision=release_config["mozilla_revision"],
            platforms=release_config['platforms']
        ),
        "extra_balrog_submitter_params": release_config['extra_balrog_submitter_params'],
        "publish_to_balrog_channels": release_config["publish_to_balrog_channels"],
        "postrelease_mark_as_shipped_enabled": release_config["postrelease_mark_as_shipped_enabled"],
        # TODO: use [] when snaps_enabled is landed
        "snap_enabled": release_config.get("snap_enabled", False),
    }

    graph = make_task_graph_strict_kwargs(**kwargs)
    log.info("Submitting task graph")
    import pprint
    log.info(pprint.pformat(graph, indent=4, width=160))
    if not options.dry_run:
        print scheduler.createTaskGraph(graph_id, graph)
Esempio n. 40
0
def _generate_tc_tasks_from_builders(builders, repo_name, revision):
    """ Return TC tasks based on a list of builders.

    Input: a list of builders and a revision
    Output: list of TC tasks base on builders we receive

    :param builders: List of builder names
    :type builders: list
    :param repo_name: push revision
    :type repo_name: str
    :param revision: push revision
    :type revision: str
    :return: TC tasks
    :rtype: dict

    """
    tasks = []
    build_builders = {}

    # We need to determine what upstream jobs need to be triggered besides the
    # builders already on our list
    for builder in builders:
        if is_upstream(builder):
            task = _create_task(
                buildername=builder,
                repo_name=repo_name,
                revision=revision,
                # task_graph_id=task_graph_id,
                properties={'upload_to_task_id': slugId()},
            )
            tasks.append(task)

            # We want to keep track of how many build builders we have
            build_builders[builder] = task

    for builder in builders:
        if is_downstream(builder):
            # For test jobs, determine_trigger_objective()[0] can be 3 things:
            # - the build job, if no build job exists
            # - the test job, if the build job is already completed
            # - None, if the build job is running
            objective, package_url, tests_url = \
                determine_trigger_objective(revision, builder)

            # The build job is already completed, we can trigger the test job
            if objective == builder:
                if objective in build_builders:
                    LOG.warning(
                        "We're creating a new build even though there's "
                        "already an existing completed build we could have "
                        "used. We hope you wanted to do this.")
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        # task_graph_id=task_graph_id,
                        parent_task_id=build_builders[objective]['taskId'],
                        properties={'upload_to_task_id': slugId()},
                    )
                    tasks.append(task)
                else:
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        properties={
                            'packageUrl': package_url,
                            'testUrl': tests_url
                        },
                    )
                    tasks.append(task)

            # The build job is running, there is nothing we can do
            elif objective is None:
                LOG.warning(
                    "We can add %s builder since the build associated "
                    "is running. This is because it is a Buildbot job.")
                pass

            # We need to trigger the build job and the test job
            else:
                if objective not in build_builders:
                    task = _create_task(
                        buildername=builder,
                        repo_name=repo_name,
                        revision=revision,
                        # task_graph_id=task_graph_id,
                        properties={'upload_to_task_id': slugId()},
                    )
                    tasks.append(task)
                    taskId = task['taskId']
                else:
                    taskId = build_builders[objective]['taskId']

                # Add test job
                task = _create_task(
                    buildername=builder,
                    repo_name=repo_name,
                    revision=revision,
                    # task_graph_id=task_graph_id,
                    parent_task_id=taskId,
                )
                tasks.append(task)

    return tasks
Esempio n. 41
0
def _generate_tasks(repo_name,
                    revision,
                    builders_graph,
                    task_graph_id=None,
                    parent_task_id=None,
                    required_task_ids=[],
                    **kwargs):
    """ Generate a TC json object with tasks based on a graph of graphs of buildernames

    :param repo_name: The name of a repository e.g. mozilla-inbound
    :type repo_name: str
    :param revision: Changeset ID of a revision.
    :type revision: str
    :param builders_graph:
        It is a graph made up of a dictionary where each
        key is a Buildbot buildername. The value for each key is either None
        or another graph of dependent builders.
    :type builders_graph: dict
    :param task_graph_id: TC graph id to which this task belongs to
    :type task_graph_id: str
    :param parent_task_id: Task from which to find artifacts. It is not a dependency.
    :type parent_task_id: int
    :returns: A dictionary of TC tasks
    :rtype: dict

    """
    if not type(required_task_ids) == list:
        raise MozciError("required_task_ids must be a list")

    tasks = []

    if type(builders_graph) != dict:
        raise MozciError("The buildbot graph should be a dictionary")

    # Let's iterate through the root builders in this graph
    for builder, dependent_graph in builders_graph.iteritems():
        # Due to bug 1221091 this will be used to know to which task
        # the artifacts will be uploaded to
        upload_to_task_id = slugId()
        task = _create_task(
            buildername=builder,
            repo_name=repo_name,
            revision=revision,
            task_graph_id=task_graph_id,
            parent_task_id=parent_task_id,
            properties={'upload_to_task_id': upload_to_task_id},
            requires=required_task_ids,
            **kwargs)
        task_id = task['taskId']
        tasks.append(task)

        if dependent_graph:
            # If there are builders this builder triggers let's add them as well
            tasks = tasks + _generate_tasks(
                repo_name=repo_name,
                revision=revision,
                builders_graph=dependent_graph,
                task_graph_id=task_graph_id,
                # The parent task id is used to find artifacts; only one can be given
                parent_task_id=upload_to_task_id,
                # The required tasks are the one holding this task from running
                required_task_ids=[task_id],
                **kwargs)

    return tasks
Esempio n. 42
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    rr_config = config['release-runner']

    buildbot_configs = rr_config['buildbot_configs']
    buildbot_configs_branch = rr_config['buildbot_configs_branch']
    sleeptime = rr_config['sleeptime']
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    docker_worker_key = rr_config.get('docker_worker_key')
    signing_pvt_key = config['signing'].get('pvt_key')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = rr_config.get('smtp_server', 'localhost')
    tc_config = {
        "credentials": {
            "clientId": config['taskcluster'].get('client_id'),
            "accessToken": config['taskcluster'].get('access_token'),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = config['balrog'].get("username")
    balrog_password = config["balrog"].get("password")
    extra_balrog_submitter_params = config["balrog"].get(
        "extra_balrog_submitter_params", "")
    beetmover_aws_access_key_id = config["beetmover"].get("aws_access_key_id")
    beetmover_aws_secret_access_key = config["beetmover"].get(
        "aws_secret_access_key")
    gpg_key_path = config["signing"].get("gpg_key_path")

    # TODO: replace release sanity with direct checks of en-US and l10n
    # revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial,
          args=(buildbot_configs, CONFIGS_WORKDIR),
          kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config:
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config['symlinks']:
            symlink = config['symlinks'].get(target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig[
                'postrelease_version_bump_enabled'][release['product']]
            postrelease_bouncer_aliases_enabled = branchConfig[
                'postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig[
                'postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            graph_id = slugId()
            done = are_en_us_builds_completed(
                index=index,
                release_name=release['name'],
                submitted_at=release['submittedAt'],
                revision=release['mozillaRevision'],
                platforms=platforms,
                queue=queue,
                tc_task_indexes=branchConfig['tc_indexes'][release['product']])
            if not done:
                log.info(
                    'Builds are not completed yet, skipping release "%s" for now',
                    release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s',
                     release['name'])
            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key":
                docker_worker_key,
                "version":
                release["version"],
                # ESR should not use "esr" suffix here:
                "next_version":
                bump_version(release["version"].replace("esr", "")),
                "appVersion":
                getAppVersion(release["version"]),
                "buildNumber":
                release["buildNumber"],
                "release_eta":
                release.get("release_eta"),
                "source_enabled":
                True,
                "checksums_enabled":
                True,
                "binary_transparency_enabled":
                branchConfig.get("binary_transparency_enabled", False),
                "repo_path":
                release["branch"],
                "revision":
                release["mozillaRevision"],
                "product":
                release["product"],
                "funsize_product":
                get_funsize_product(release["product"]),
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset":
                release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates":
                release.get('partial_updates', list()),
                "branch":
                release['branchShortName'],
                "updates_enabled":
                bool(release["partials"]),
                "l10n_config":
                get_l10n_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "en_US_config":
                get_en_US_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "verifyConfigs": {},
                "balrog_api_root":
                branchConfig["balrog_api_root"],
                "funsize_balrog_api_root":
                branchConfig["funsize_balrog_api_root"],
                "balrog_username":
                balrog_username,
                "balrog_password":
                balrog_password,
                "beetmover_aws_access_key_id":
                beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key":
                beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class":
                branchConfig['signing_class'][release["product"]],
                "accepted_mar_channel_id":
                branchConfig.get('accepted_mar_channel_id',
                                 {}).get(release["product"]),
                "signing_cert":
                branchConfig['signing_cert'][release["product"]],
                "moz_disable_mar_cert_verification":
                branchConfig.get('moz_disable_mar_cert_verification'),
                "root_home_dir":
                branchConfig['root_home_dir'][release["product"]],
                "bouncer_enabled":
                branchConfig["bouncer_enabled"],
                "updates_builder_enabled":
                branchConfig["updates_builder_enabled"],
                "update_verify_enabled":
                branchConfig["update_verify_enabled"],
                "release_channels":
                release_channels,
                "final_verify_channels":
                final_verify_channels,
                "final_verify_platforms":
                branchConfig['release_platforms'],
                "uptake_monitoring_platforms":
                branchConfig['uptake_monitoring_platforms'][
                    release["product"]],
                "signing_pvt_key":
                signing_pvt_key,
                "build_tools_repo_path":
                branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled":
                branchConfig['push_to_candidates_enabled'],
                # TODO: temporary config enabled during 53 Fennec beta cycle
                "candidates_fennec_enabled":
                branchConfig.get('candidates_fennec_enabled'),
                "stage_product":
                branchConfig['stage_product'][release['product']],
                "postrelease_bouncer_aliases_enabled":
                postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled":
                branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url":
                branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled":
                postrelease_enabled,
                "postrelease_mark_as_shipped_enabled":
                postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled":
                push_to_releases_enabled,
                "push_to_releases_automatic":
                branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket":
                branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms":
                branchConfig.get("partner_repacks_platforms",
                                 {}).get(release["product"], []),
                "eme_free_repacks_platforms":
                branchConfig.get("eme_free_repacks_platforms",
                                 {}).get(release["product"], []),
                "sha1_repacks_platforms":
                branchConfig.get("sha1_repacks_platforms", []),
                "l10n_changesets":
                release['l10n_changesets'],
                "extra_balrog_submitter_params":
                extra_balrog_submitter_params + " --product " +
                release["product"].capitalize(),
                "publish_to_balrog_channels":
                publish_to_balrog_channels,
                "snap_enabled":
                branchConfig.get("snap_enabled",
                                 {}).get(release["product"], False),
                "update_verify_channel":
                branchConfig.get("update_verify_channel",
                                 {}).get(release["product"]),
                "update_verify_requires_cdn_push":
                branchConfig.get("update_verify_requires_cdn_push", False),
            }

            # TODO: en-US validation for multiple tasks
            # validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print(scheduler.createTaskGraph(graph_id, graph))

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=graph_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s'
                % (graph_id, exception))
            log.exception(
                'Failed to start release "%s" promotion for graph %s. Error(s): %s',
                release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Esempio n. 43
0
 def test_slug_id(self):
   with mock.patch('uuid.uuid4') as p:
     p.return_value = uuid.UUID('bed97923-7616-4ec8-85ed-4b695f67ac2e')
     expected = 'vtl5I3YWTsiF7UtpX2esLg'
     actual = subject.slugId()
     self.assertEqual(expected, actual)