Exemplo n.º 1
0
        async def task(self, task_id):
            now = datetime.utcnow()

            if "retry:" in task_id:
                retry = int(task_id[task_id.index("retry:") + 6])
            else:
                retry = 3

            return {
                "created": stringDate(now),
                "deadline": stringDate(now + timedelta(hours=2)),
                "dependencies": [],
                "expires": stringDate(now + timedelta(hours=24)),
                "payload": {
                    "command": ["/bin/command"],
                    "env": {},
                    "image": "alpine",
                    "maxRunTime": 3600,
                },
                "priority": "lowest",
                "provisionerId": "aws-provisioner-v1",
                "requires": "all-completed",
                "retries": retry,
                "scopes": [],
                "routes": ["index.{}.latest".format(task_id)],
                "taskGroupId": "group-{}".format(task_id),
                "workerType": "niceWorker",
            }
Exemplo n.º 2
0
 def _create_push_task(self, service, service_build_tasks):
     push_task = yaml_load(
         PUSH_TASK.substitute(
             clone_url=self.github_event.clone_url,
             commit=self.github_event.commit,
             deadline=stringDate(self.now + DEADLINE),
             docker_secret=self.docker_secret,
             max_run_time=int(MAX_RUN_TIME.total_seconds()),
             now=stringDate(self.now),
             owner_email=OWNER_EMAIL,
             provisioner=PROVISIONER_ID,
             scheduler=SCHEDULER_ID,
             service_name=service.name,
             source_url=SOURCE_URL,
             task_group=self.task_group,
             worker=WORKER_TYPE,
         )
     )
     push_task["dependencies"].append(service_build_tasks[service.name])
     task_id = slugId()
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, push_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, push_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating push task: %s", exc)
             raise
     return task_id
Exemplo n.º 3
0
    def build_tasks(self, parent_task_id, env=None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()

        for pool in self.iterpools():
            for i in range(1, pool.tasks + 1):
                task = yaml.safe_load(
                    FUZZING_TASK.substitute(
                        created=stringDate(now),
                        deadline=stringDate(now + timedelta(
                            seconds=pool.max_run_time)),
                        description=DESCRIPTION.replace("\n", "\\n"),
                        expires=stringDate(fromNow("1 week", now)),
                        max_run_time=pool.max_run_time,
                        name=(f"Fuzzing task {pool.platform}-{pool.pool_id} - "
                              f"{i}/{pool.tasks}"),
                        owner_email=OWNER_EMAIL,
                        pool_id=pool.pool_id,
                        provisioner=PROVISIONER_ID,
                        scheduler=SCHEDULER_ID,
                        secret=DECISION_TASK_SECRET,
                        task_group=parent_task_id,
                        task_id=self.task_id,
                    ))
                task["payload"]["artifacts"].update(
                    pool.artifact_map(stringDate(fromNow("1 week", now))))
                # `container` can be either a string or a dict, so can't template it
                task["payload"]["image"] = pool.container
                task["scopes"] = sorted(chain(pool.scopes, task["scopes"]))
                add_capabilities_for_scopes(task)
                if env is not None:
                    assert set(task["payload"]["env"]).isdisjoint(set(env))
                    task["payload"]["env"].update(env)

                yield slugId(), task
Exemplo n.º 4
0
 def _create_recipe_test_task(self, recipe, dep_tasks, recipe_test_tasks):
     service_path = self.services.root / "services" / "test-recipes"
     dockerfile = service_path / f"Dockerfile-{recipe.file.stem}"
     if not dockerfile.is_file():
         dockerfile = service_path / "Dockerfile"
     test_task = yaml_load(
         RECIPE_TEST_TASK.substitute(
             clone_url=self.github_event.clone_url,
             commit=self.github_event.commit,
             deadline=stringDate(self.now + DEADLINE),
             dockerfile=str(dockerfile.relative_to(self.services.root)),
             max_run_time=int(MAX_RUN_TIME.total_seconds()),
             now=stringDate(self.now),
             owner_email=OWNER_EMAIL,
             provisioner=PROVISIONER_ID,
             recipe_name=recipe.name,
             scheduler=SCHEDULER_ID,
             source_url=SOURCE_URL,
             task_group=self.task_group,
             worker=WORKER_TYPE,
         )
     )
     test_task["dependencies"].extend(dep_tasks)
     task_id = recipe_test_tasks[recipe.name]
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, test_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, test_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating recipe test task: %s", exc)
             raise
     return task_id
Exemplo n.º 5
0
    def build_tasks(self, parent_task_id: str, env: Optional[Dict[str, str]] = None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()

        for pool in self.iterpools():
            assert pool.max_run_time is not None
            assert pool.tasks is not None
            for i in range(1, pool.tasks + 1):
                task = yaml.safe_load(
                    FUZZING_TASK.substitute(
                        created=stringDate(now),
                        deadline=stringDate(now + timedelta(seconds=pool.max_run_time)),
                        description=DESCRIPTION.replace("\n", "\\n"),
                        expires=stringDate(fromNow("4 weeks", now)),
                        max_run_time=pool.max_run_time,
                        name=(
                            f"Fuzzing task {pool.platform}-{pool.pool_id} - "
                            f"{i}/{pool.tasks}"
                        ),
                        owner_email=OWNER_EMAIL,
                        pool_id=pool.pool_id,
                        provisioner=PROVISIONER_ID,
                        scheduler=SCHEDULER_ID,
                        secret=DECISION_TASK_SECRET,
                        task_group=parent_task_id,
                        task_id=self.task_id,
                    )
                )
                configure_task(task, cast(PoolConfiguration, pool), now, env)
                yield slugId(), task
Exemplo n.º 6
0
        def task(self, task_id):
            now = datetime.utcnow()

            if 'retry:' in task_id:
                retry = int(task_id[task_id.index('retry:')+6])
            else:
                retry = 3

            return {
                'created': stringDate(now),
                'deadline': stringDate(now + timedelta(hours=2)),
                'dependencies': [],
                'expires': stringDate(now + timedelta(hours=24)),
                'payload': {
                    'command': ['/bin/command'],
                    'env': {},
                    'image': 'alpine',
                    'maxRunTime': 3600,
                },
                'priority': 'lowest',
                'provisionerId': 'aws-provisioner-v1',
                'requires': 'all-completed',
                'retries': retry,
                'scopes': [],
                'routes': [
                    'index.{}.latest'.format(task_id),
                ],
                'taskGroupId': 'group-{}'.format(task_id),
                'workerType': 'niceWorker'
            }
Exemplo n.º 7
0
        def task(self, task_id):
            now = datetime.utcnow()

            if 'retry:' in task_id:
                retry = int(task_id[task_id.index('retry:')+6])
            else:
                retry = 3

            return {
                'created': stringDate(now),
                'deadline': stringDate(now + timedelta(hours=2)),
                'dependencies': [],
                'expires': stringDate(now + timedelta(hours=24)),
                'payload': {
                    'command': ['/bin/command'],
                    'env': {},
                    'image': 'alpine',
                    'maxRunTime': 3600,
                },
                'priority': 'lowest',
                'provisionerId': 'aws-provisioner-v1',
                'requires': 'all-completed',
                'retries': retry,
                'scopes': [],
                'routes': [
                    'index.{}.latest'.format(task_id),
                ],
                'taskGroupId': 'group-{}'.format(task_id),
                'workerType': 'niceWorker'
            }
Exemplo n.º 8
0
def test_create_03(mocker):
    """test push task creation"""
    taskcluster = mocker.patch("orion_decision.scheduler.Taskcluster",
                               autospec=True)
    queue = taskcluster.get_service.return_value
    now = datetime.utcnow()
    root = FIXTURES / "services03"
    evt = mocker.Mock(spec=GithubEvent())
    evt.repo.path = root
    evt.repo.git = mocker.Mock(return_value="\n".join(
        str(p) for p in root.glob("**/*")))
    evt.commit = "commit"
    evt.branch = "push"
    evt.event_type = "push"
    evt.clone_url = "https://example.com"
    evt.pull_request = None
    sched = Scheduler(evt, now, "group", "secret", "push")
    sched.services["test1"].dirty = True
    sched.create_tasks()
    assert queue.createTask.call_count == 2
    build_task_id, build_task = queue.createTask.call_args_list[0].args
    assert build_task == yaml_load(
        BUILD_TASK.substitute(
            clone_url="https://example.com",
            commit="commit",
            deadline=stringDate(now + DEADLINE),
            dockerfile="test1/Dockerfile",
            expires=stringDate(now + ARTIFACTS_EXPIRE),
            load_deps="0",
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            route="index.project.fuzzing.orion.test1.push",
            scheduler=SCHEDULER_ID,
            service_name="test1",
            source_url=SOURCE_URL,
            task_group="group",
            worker=WORKER_TYPE,
        ))
    _, push_task = queue.createTask.call_args_list[1].args
    push_expected = yaml_load(
        PUSH_TASK.substitute(
            clone_url="https://example.com",
            commit="commit",
            deadline=stringDate(now + DEADLINE),
            docker_secret="secret",
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            scheduler=SCHEDULER_ID,
            service_name="test1",
            source_url=SOURCE_URL,
            task_group="group",
            worker=WORKER_TYPE,
        ))
    push_expected["dependencies"].append(build_task_id)
    assert push_task == push_expected
Exemplo n.º 9
0
    def index(self, revision, **kwargs):
        """
        Index current task on Taskcluster index
        """
        assert isinstance(revision, Revision)

        if settings.taskcluster.local or self.index_service is None:
            logger.info("Skipping taskcluster indexing", rev=str(revision), **kwargs)
            return

        # Build payload
        payload = revision.as_dict()
        payload.update(kwargs)

        # Always add the indexing
        now = datetime.utcnow()
        payload["indexed"] = stringDate(now)

        # Always add the source and try config
        payload["source"] = "try"
        payload["try_task_id"] = settings.try_task_id
        payload["try_group_id"] = settings.try_group_id

        # Always add the repository we are working on
        # This is mainly used by the frontend to list & filter diffs
        payload["repository"] = revision.target_repository

        # Add restartable flag for monitoring
        payload["monitoring_restart"] = payload["state"] == "error" and payload.get(
            "error_code"
        ) in ("watchdog", "mercurial")

        # Add a sub namespace with the task id to be able to list
        # tasks from the parent namespace
        namespaces = revision.namespaces + [
            "{}.{}".format(namespace, settings.taskcluster.task_id)
            for namespace in revision.namespaces
        ]

        # Build complete namespaces list, with monitoring update
        full_namespaces = [
            TASKCLUSTER_NAMESPACE.format(channel=settings.app_channel, name=name)
            for name in namespaces
        ]

        # Index for all required namespaces
        for namespace in full_namespaces:
            self.index_service.insertTask(
                namespace,
                {
                    "taskId": settings.taskcluster.task_id,
                    "rank": 0,
                    "data": payload,
                    "expires": stringDate(now + timedelta(days=TASKCLUSTER_INDEX_TTL)),
                },
            )
Exemplo n.º 10
0
    def build_tasks(self, parent_task_id, env=None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()
        for i in range(1, self.tasks + 1):
            task_id = slugId()
            task = {
                "taskGroupId": parent_task_id,
                "dependencies": [parent_task_id],
                "created": stringDate(now),
                "deadline":
                stringDate(now + timedelta(seconds=self.cycle_time)),
                "expires": stringDate(fromNow("1 month", now)),
                "extra": {},
                "metadata": {
                    "description": DESCRIPTION,
                    "name": f"Fuzzing task {self.id} - {i}/{self.tasks}",
                    "owner": OWNER_EMAIL,
                    "source": "https://github.com/MozillaSecurity/fuzzing-tc",
                },
                "payload": {
                    "artifacts": {
                        "project/fuzzing/private/logs": {
                            "expires": stringDate(fromNow("1 month", now)),
                            "path": "/logs/",
                            "type": "directory",
                        }
                    },
                    "cache": {},
                    "capabilities": {},
                    "env": {
                        "TASKCLUSTER_FUZZING_POOL": self.filename
                    },
                    "features": {
                        "taskclusterProxy": True
                    },
                    "image": self.container,
                    "maxRunTime": self.cycle_time,
                },
                "priority": "high",
                "provisionerId": PROVISIONER_ID,
                "workerType": self.id,
                "retries": 1,
                "routes": [],
                "schedulerId": SCHEDULER_ID,
                "scopes": self.scopes,
                "tags": {},
            }
            if env is not None:
                assert set(task["payload"]["env"]).isdisjoint(set(env))
                task["payload"]["env"].update(env)

            yield task_id, task
Exemplo n.º 11
0
 def _create_svc_test_task(self, service, test, service_build_tasks):
     image = test.image
     deps = []
     if image in service_build_tasks:
         if self.services[image].dirty:
             deps.append(service_build_tasks[image])
             image = {
                 "type": "task-image",
                 "taskId": service_build_tasks[image],
             }
         else:
             image = {
                 "type": "indexed-image",
                 "namespace": (f"project.fuzzing.orion.{image}.{self.push_branch}"),
             }
         image["path"] = f"public/{test.image}.tar.zst"
     test_task = yaml_load(
         TEST_TASK.substitute(
             deadline=stringDate(self.now + DEADLINE),
             max_run_time=int(MAX_RUN_TIME.total_seconds()),
             now=stringDate(self.now),
             owner_email=OWNER_EMAIL,
             provisioner=PROVISIONER_ID,
             scheduler=SCHEDULER_ID,
             service_name=service.name,
             source_url=SOURCE_URL,
             task_group=self.task_group,
             test_name=test.name,
             worker=WORKER_TYPE,
         )
     )
     test_task["payload"]["image"] = image
     test_task["dependencies"].extend(deps)
     service_path = str(service.root.relative_to(self.services.root))
     test.update_task(
         test_task,
         self.github_event.clone_url,
         self.github_event.fetch_ref,
         self.github_event.commit,
         service_path,
     )
     task_id = slugId()
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, test_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, test_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating test task: %s", exc)
             raise
     return task_id
Exemplo n.º 12
0
    def task(self):
        """Task definition"""
        now = datetime.utcnow()

        dependencies = [self.parent_id]
        if self.dependency is not None:
            dependencies.append(self.dependency)

        return {
            "taskGroupId": self.parent_id,
            "dependencies": dependencies,
            "created": stringDate(now),
            "deadline": stringDate(now + timedelta(seconds=MAX_RUNTIME)),
            "expires": stringDate(fromNow("1 week", now)),
            "provisionerId": "proj-fuzzing",
            "metadata": {
                "description": "",
                "name": f"{self.TASK_NAME} ({self.bug_id})",
                "owner": "*****@*****.**",
                "source": "https://github.com/MozillaSecurity/bugmon",
            },
            "payload": {
                "artifacts": {
                    "project/fuzzing/bugmon": {
                        "path": "/bugmon-artifacts/",
                        "type": "directory",
                    }
                },
                "cache": {},
                "capabilities": {
                    "devices": {
                        "hostSharedMemory": True,
                        "loopbackAudio": True
                    }
                },
                "env": self.env,
                "features": {
                    "taskclusterProxy": True
                },
                "image": "mozillasecurity/bugmon:latest",
                "maxRunTime": MAX_RUNTIME,
            },
            "priority": "high",
            "workerType": self.WORKER_TYPE,
            "retries": 5,
            "routes": ["[email protected]"],
            "schedulerId": "-",
            "scopes": self.scopes,
            "tags": {},
        }
Exemplo n.º 13
0
    def queue_reduction_task(self, os_name: str, crash_id: int) -> None:
        """Queue a reduction task in Taskcluster.

        Arguments:
            os_name: The OS to schedule the task for.
            crash_id: The CrashManager crash ID to reduce.
        """
        if self.dry_run:
            return None
        dest_queue = TC_QUEUES[os_name]
        my_task_id = os.environ.get("TASK_ID")
        task_id = slugId()
        now = datetime.now(timezone.utc)
        if os_name == "windows":
            image_task_id = self.image_artifact_task(
                "project.fuzzing.orion.grizzly-win.master")
        elif os_name == "macosx":
            image_task_id = self.image_artifact_task(
                "project.fuzzing.orion.grizzly-macos.master")
        else:
            image_task_id = None
        task = yaml_load(REDUCE_TASKS[os_name].substitute(
            crash_id=crash_id,
            created=stringDate(now),
            deadline=stringDate(now + REDUCTION_DEADLINE),
            description=DESCRIPTION,
            expires=stringDate(now + REDUCTION_EXPIRES),
            image_task_id=image_task_id,
            max_run_time=int(REDUCTION_MAX_RUN_TIME.total_seconds()),
            os_name=os_name,
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            scheduler=SCHEDULER_ID,
            task_group=my_task_id,
            worker=dest_queue,
        ))
        queue = Taskcluster.get_service("queue")
        LOG.info("Creating task %s: %s", task_id, task["metadata"]["name"])
        try:
            queue.createTask(task_id, task)
        except TaskclusterFailure as exc:
            LOG.error("Error creating task: %s", exc)
            return None
        LOG.info("Marking %d Q4 (in progress)", crash_id)
        CrashManager().update_testcase_quality(crash_id,
                                               Quality.REDUCING.value)
Exemplo n.º 14
0
 def _create_build_task(
     self, service, dirty_dep_tasks, test_tasks, service_build_tasks
 ):
     if self.github_event.pull_request is not None:
         build_index = (
             f"index.project.fuzzing.orion.{service.name}"
             f".pull_request.{self.github_event.pull_request}"
         )
     else:
         build_index = (
             f"index.project.fuzzing.orion.{service.name}"
             f".{self.github_event.branch}"
         )
     build_task = yaml_load(
         BUILD_TASK.substitute(
             clone_url=self.github_event.clone_url,
             commit=self.github_event.commit,
             deadline=stringDate(self.now + DEADLINE),
             dockerfile=str(service.dockerfile.relative_to(service.context)),
             expires=stringDate(self.now + ARTIFACTS_EXPIRE),
             load_deps="1" if dirty_dep_tasks else "0",
             max_run_time=int(MAX_RUN_TIME.total_seconds()),
             now=stringDate(self.now),
             owner_email=OWNER_EMAIL,
             provisioner=PROVISIONER_ID,
             route=build_index,
             scheduler=SCHEDULER_ID,
             service_name=service.name,
             source_url=SOURCE_URL,
             task_group=self.task_group,
             worker=WORKER_TYPE,
         )
     )
     build_task["dependencies"].extend(dirty_dep_tasks + test_tasks)
     task_id = service_build_tasks[service.name]
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, build_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, build_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating build task: %s", exc)
             raise
     return task_id
Exemplo n.º 15
0
def test_naive():
    dateObj = datetime.datetime(year=2000,
                                month=1,
                                day=1,
                                hour=1,
                                minute=1,
                                second=1)
    expected = '2000-01-01T01:01:01Z'
    actual = subject.stringDate(dateObj)
    assert expected == actual
Exemplo n.º 16
0
    async def retry_task(self, group_id, hook_id, task_id):
        """
        Retry a Taskcluster task by:
        - fetching its definition
        - updating its dates & retry count
        - creating a new task
        Do NOT use rerunTask as it's deprecated AND not recommended
        https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
        """
        # Fetch task definition
        definition = await self.queue.task(task_id)

        # Update timestamps
        date_format = "%Y-%m-%dT%H:%M:%S.%f%z"
        now = datetime.utcnow()
        created = datetime.strptime(definition["created"], date_format)
        deadline = datetime.strptime(definition["deadline"], date_format)
        expires = datetime.strptime(definition["expires"], date_format)
        definition["created"] = stringDate(now)
        definition["deadline"] = stringDate(now + (deadline - created))
        definition["expires"] = stringDate(now + (expires - created))

        # Decrement retries count
        definition["retries"] -= 1
        if definition["retries"] < 0:
            logger.warn(
                "Will not retry task, no more retries left",
                task_id=task_id,
                group_id=group_id,
                hook_id=hook_id,
            )
            return

        # Trigger a new task with the updated definition
        new_task_id = slugId()
        logger.info("Retry task", old_task=task_id, new_task=new_task_id)
        await self.queue.createTask(new_task_id, definition)

        # Enqueue task to check later
        await self.bus.send(self.queue_name, (group_id, hook_id, new_task_id))

        return new_task_id
Exemplo n.º 17
0
    async def retry_task(self, group_id, hook_id, task_id):
        '''
        Retry a Taskcluster task by:
        - fetching its definition
        - updating its dates & retry count
        - creating a new task
        Do NOT use rerunTask as it's deprecated AND not recommended
        https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
        '''
        assert self.queue is not None

        # Fetch task definition
        definition = self.queue.task(task_id)

        # Update timestamps
        date_format = '%Y-%m-%dT%H:%M:%S.%f%z'
        now = datetime.utcnow()
        created = datetime.strptime(definition['created'], date_format)
        deadline = datetime.strptime(definition['deadline'], date_format)
        expires = datetime.strptime(definition['expires'], date_format)
        definition['created'] = stringDate(now)
        definition['deadline'] = stringDate(now + (deadline - created))
        definition['expires'] = stringDate(now + (expires - created))

        # Decrement retries count
        definition['retries'] -= 1
        if definition['retries'] < 0:
            logger.warn('Will not retry task, no more retries left',
                        task_id=task_id,
                        group_id=group_id,
                        hook_id=hook_id)
            return

        # Trigger a new task with the updated definition
        new_task_id = slugId()
        logger.info('Retry task', old_task=task_id, new_task=new_task_id)
        self.queue.createTask(new_task_id, definition)

        # Monitor new task
        await self.add_task(group_id, hook_id, new_task_id)

        return new_task_id
Exemplo n.º 18
0
 def test_aware(self):
     dateObj = datetime.datetime(year=2000,
                                 month=1,
                                 day=1,
                                 hour=1,
                                 minute=1,
                                 second=1,
                                 tzinfo=utc)
     expected = '2000-01-01T01:01:01Z'
     actual = subject.stringDate(dateObj)
     self.assertEqual(expected, actual)
Exemplo n.º 19
0
def configure_task(
    task: Dict[str, Any],
    config: "PoolConfiguration",
    now: datetime,
    env: Optional[Dict[str, str]],
) -> None:
    task["payload"]["artifacts"].update(
        config.artifact_map(stringDate(fromNow("4 weeks", now)))
    )
    task["scopes"] = sorted(chain(config.get_scopes(), task["scopes"]))
    add_capabilities_for_scopes(task)
    add_task_image(task, config)
    if config.platform == "windows":
        task["payload"]["env"]["MSYSTEM"] = "MINGW64"
        task["payload"]["command"] = [
            "set HOME=%CD%",
            "set ARTIFACTS=%CD%",
            "set PATH="
            + ";".join(
                [
                    r"%CD%\msys64\opt\python",
                    r"%CD%\msys64\opt\python\Scripts",
                    r"%CD%\msys64\MINGW64\bin",
                    r"%CD%\msys64\usr\bin",
                    "%PATH%",
                ]
            ),
            "fuzzing-pool-launch",
        ]
        if config.run_as_admin:
            task["payload"].setdefault("osGroups", [])
            task["payload"]["osGroups"].append("Administrators")
            task["payload"]["features"]["runAsAdministrator"] = True
    elif config.platform == "macos":
        task["payload"]["command"] = [
            [
                "/bin/bash",
                "-c",
                "-x",
                'eval "$(homebrew/bin/brew shellenv)" && exec fuzzing-pool-launch',
            ],
        ]

    if config.platform in {"macos", "windows"}:
        # translate artifacts from dict to array for generic-worker
        task["payload"]["artifacts"] = [
            # `... or artifact` because dict.update returns None
            artifact.update({"name": name}) or artifact
            for name, artifact in task["payload"]["artifacts"].items()
        ]
    if env is not None:
        assert set(task["payload"]["env"]).isdisjoint(set(env))
        task["payload"]["env"].update(env)
Exemplo n.º 20
0
 def test_naive(self):
     dateObj = datetime.datetime(
         year=2000,
         month=1,
         day=1,
         hour=1,
         minute=1,
         second=1
     )
     expected = '2000-01-01T01:01:01Z'
     actual = subject.stringDate(dateObj)
     self.assertEqual(expected, actual)
Exemplo n.º 21
0
    def queue_reduction_task(self, os_name, crash_id):
        """Queue a reduction task in Taskcluster.

        Arguments:
            os_name (str): The OS to schedule the task for.
            crash_id (int): The CrashManager crash ID to reduce.

        Returns:
            None
        """
        if self.dry_run:
            return
        dest_queue = TC_QUEUES[os_name]
        my_task_id = os.environ.get("TASK_ID")
        task_id = slugId()
        now = datetime.utcnow()
        task = yaml_load(
            REDUCE_TASK.substitute(
                task_group=my_task_id,
                now=stringDate(now),
                deadline=stringDate(now + REDUCTION_DEADLINE),
                expires=stringDate(now + REDUCTION_EXPIRES),
                provisioner=PROVISIONER_ID,
                scheduler=SCHEDULER_ID,
                worker=dest_queue,
                max_run_time=int(REDUCTION_MAX_RUN_TIME.total_seconds()),
                description=DESCRIPTION,
                owner_email=OWNER_EMAIL,
                crash_id=crash_id,
                os_name=os_name,
            ))
        queue = Taskcluster.get_service("queue")
        LOG.info("Creating task %s: %s", task_id, task["metadata"]["name"])
        try:
            queue.createTask(task_id, task)
        except TaskclusterFailure as exc:
            LOG.error("Error creating task: %s", exc)
            return
        LOG.info("Marking %d Q4 (in progress)", crash_id)
        CrashManager().update_testcase_quality(crash_id, 4)
Exemplo n.º 22
0
    async def retry_task(self, group_id, hook_id, task_id):
        '''
        Retry a Taskcluster task by:
        - fetching its definition
        - updating its dates & retry count
        - creating a new task
        Do NOT use rerunTask as it's deprecated AND not recommended
        https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
        '''
        assert self.queue is not None

        # Fetch task definition
        definition = self.queue.task(task_id)

        # Update timestamps
        date_format = '%Y-%m-%dT%H:%M:%S.%f%z'
        now = datetime.utcnow()
        created = datetime.strptime(definition['created'], date_format)
        deadline = datetime.strptime(definition['deadline'], date_format)
        expires = datetime.strptime(definition['expires'], date_format)
        definition['created'] = stringDate(now)
        definition['deadline'] = stringDate(now + (deadline - created))
        definition['expires'] = stringDate(now + (expires - created))

        # Decrement retries count
        definition['retries'] -= 1
        if definition['retries'] < 0:
            logger.warn('Will not retry task, no more retries left', task_id=task_id, group_id=group_id, hook_id=hook_id)
            return

        # Trigger a new task with the updated definition
        new_task_id = slugId()
        logger.info('Retry task', old_task=task_id, new_task=new_task_id)
        self.queue.createTask(new_task_id, definition)

        # Monitor new task
        await self.add_task(group_id, hook_id, new_task_id)

        return new_task_id
Exemplo n.º 23
0
    def build_tasks(self, parent_task_id: str, env: Optional[Dict[str, str]] = None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()
        preprocess_task_id = None

        preprocess = cast(PoolConfiguration, self.create_preprocess())
        if preprocess is not None:
            assert preprocess.max_run_time is not None
            task = yaml.safe_load(
                FUZZING_TASK.substitute(
                    created=stringDate(now),
                    deadline=stringDate(
                        now + timedelta(seconds=preprocess.max_run_time)
                    ),
                    description=DESCRIPTION.replace("\n", "\\n"),
                    expires=stringDate(fromNow("4 weeks", now)),
                    max_run_time=preprocess.max_run_time,
                    name=f"Fuzzing task {self.task_id} - preprocess",
                    owner_email=OWNER_EMAIL,
                    pool_id=self.pool_id,
                    provisioner=PROVISIONER_ID,
                    scheduler=SCHEDULER_ID,
                    secret=DECISION_TASK_SECRET,
                    task_group=parent_task_id,
                    task_id=self.task_id,
                )
            )
            task["payload"]["env"]["TASKCLUSTER_FUZZING_PREPROCESS"] = "1"
            configure_task(task, preprocess, now, env)
            preprocess_task_id = slugId()
            yield preprocess_task_id, task

        assert self.max_run_time is not None
        assert self.tasks is not None
        for i in range(1, self.tasks + 1):
            task = yaml.safe_load(
                FUZZING_TASK.substitute(
                    created=stringDate(now),
                    deadline=stringDate(now + timedelta(seconds=self.max_run_time)),
                    description=DESCRIPTION.replace("\n", "\\n"),
                    expires=stringDate(fromNow("4 weeks", now)),
                    max_run_time=self.max_run_time,
                    name=f"Fuzzing task {self.task_id} - {i}/{self.tasks}",
                    owner_email=OWNER_EMAIL,
                    pool_id=self.pool_id,
                    provisioner=PROVISIONER_ID,
                    scheduler=SCHEDULER_ID,
                    secret=DECISION_TASK_SECRET,
                    task_group=parent_task_id,
                    task_id=self.task_id,
                )
            )
            if preprocess_task_id is not None:
                task["dependencies"].append(preprocess_task_id)
            configure_task(task, self, now, env)
            yield slugId(), task
Exemplo n.º 24
0
def upload_artifact(queue_service, artifact_path, content, content_type, ttl):
    """
    Create an artifact on the current Taskcluster Task in 2 steps:
    1. create the Artifact through the API
    2. upload the file on the storage provider
    """
    task_id = os.environ.get("TASK_ID")
    run_id = os.environ.get("RUN_ID")
    proxy = os.environ.get("TASKCLUSTER_PROXY_URL")
    assert task_id and run_id and proxy, "Can only run in Taskcluster tasks with proxy"
    assert isinstance(content, str)
    assert isinstance(ttl, datetime.timedelta)

    # Create S3 artifact on Taskcluster
    resp = queue_service.createArtifact(
        task_id,
        run_id,
        artifact_path,
        {
            "storageType": "s3",
            "expires": stringDate(datetime.datetime.utcnow() + ttl),
            "contentType": content_type,
        },
    )
    assert resp["storageType"] == "s3", "Not an s3 storage"
    assert "putUrl" in resp, "Missing putUrl"
    assert "contentType" in resp, "Missing contentType"

    # Push the artifact on storage service
    headers = {"Content-Type": resp["contentType"]}
    push = requests.put(url=resp["putUrl"], headers=headers, data=content)
    push.raise_for_status()

    # Build the absolute url
    return "/api/queue/v1/task/{task_id}/runs/{run_id}/artifacts/{path}".format(
        task_id=task_id,
        run_id=run_id,
        path=artifact_path,
    )
Exemplo n.º 25
0
def test_create_09(mocker):
    """test recipe test task creation"""
    taskcluster = mocker.patch("orion_decision.scheduler.Taskcluster",
                               autospec=True)
    queue = taskcluster.get_service.return_value
    now = datetime.utcnow()
    root = FIXTURES / "services03"
    evt = mocker.Mock(spec=GithubEvent())
    evt.repo.path = root
    evt.repo.git = mocker.Mock(return_value="\n".join(
        str(p) for p in root.glob("**/*")))
    evt.commit = "commit"
    evt.branch = "main"
    evt.clone_url = "https://example.com"
    evt.pull_request = None
    sched = Scheduler(evt, now, "group", "secret", "push")
    sched.services["test5"].dirty = True
    sched.services["test6"].dirty = True
    sched.services.recipes["withdep.sh"].dirty = True
    sched.create_tasks()
    assert queue.createTask.call_count == 3
    task1_id, task1 = queue.createTask.call_args_list[0].args
    assert task1 == yaml_load(
        BUILD_TASK.substitute(
            clone_url="https://example.com",
            commit="commit",
            deadline=stringDate(now + DEADLINE),
            dockerfile="test5/Dockerfile",
            expires=stringDate(now + ARTIFACTS_EXPIRE),
            load_deps="0",
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            route="index.project.fuzzing.orion.test5.main",
            scheduler=SCHEDULER_ID,
            service_name="test5",
            source_url=SOURCE_URL,
            task_group="group",
            worker=WORKER_TYPE,
        ))
    task2_id, task2 = queue.createTask.call_args_list[1].args
    expected2 = yaml_load(
        RECIPE_TEST_TASK.substitute(
            clone_url="https://example.com",
            commit="commit",
            deadline=stringDate(now + DEADLINE),
            dockerfile="services/test-recipes/Dockerfile",
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            recipe_name="withdep.sh",
            scheduler=SCHEDULER_ID,
            source_url=SOURCE_URL,
            task_group="group",
            worker=WORKER_TYPE,
        ))
    expected2["dependencies"].append(task1_id)
    assert task2 == expected2
    _, task3 = queue.createTask.call_args_list[2].args
    expected3 = yaml_load(
        BUILD_TASK.substitute(
            clone_url="https://example.com",
            commit="commit",
            deadline=stringDate(now + DEADLINE),
            dockerfile="test6/Dockerfile",
            expires=stringDate(now + ARTIFACTS_EXPIRE),
            load_deps="0",
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            route="index.project.fuzzing.orion.test6.main",
            scheduler=SCHEDULER_ID,
            service_name="test6",
            source_url=SOURCE_URL,
            task_group="group",
            worker=WORKER_TYPE,
        ))
    expected3["dependencies"].append(task2_id)
    assert task3 == expected3
Exemplo n.º 26
0
def test_create_08(mocker, ci1_dirty, svc1_dirty, svc2_dirty, expected_image):
    """test "test" tasks creation with dirty ci image"""
    taskcluster = mocker.patch("orion_decision.scheduler.Taskcluster",
                               autospec=True)
    queue = taskcluster.get_service.return_value
    now = datetime.utcnow()
    root = FIXTURES / "services06"
    evt = mocker.Mock(spec=GithubEvent())
    evt.repo.path = root
    evt.repo.git = mocker.Mock(return_value="\n".join(
        str(p) for p in root.glob("**/*")))
    evt.commit = "commit"
    evt.branch = "main"
    evt.fetch_ref = "fetch"
    evt.clone_url = "https://example.com"
    evt.pull_request = None
    sched = Scheduler(evt, now, "group", "secret", "push")
    sched.services["testci1"].dirty = ci1_dirty
    sched.services["svc1"].dirty = svc1_dirty
    sched.services["svc2"].dirty = svc2_dirty
    sched.create_tasks()
    assert queue.createTask.call_count == 3 if ci1_dirty else 2
    call_idx = 0
    if ci1_dirty:
        task1_id, task1 = queue.createTask.call_args_list[call_idx].args
        call_idx += 1
        assert task1 == yaml_load(
            BUILD_TASK.substitute(
                clone_url="https://example.com",
                commit="commit",
                deadline=stringDate(now + DEADLINE),
                dockerfile="testci1/Dockerfile",
                expires=stringDate(now + ARTIFACTS_EXPIRE),
                load_deps="0",
                max_run_time=int(MAX_RUN_TIME.total_seconds()),
                now=stringDate(now),
                owner_email=OWNER_EMAIL,
                provisioner=PROVISIONER_ID,
                route="index.project.fuzzing.orion.testci1.main",
                scheduler=SCHEDULER_ID,
                service_name="testci1",
                source_url=SOURCE_URL,
                task_group="group",
                worker=WORKER_TYPE,
            ))
    svc = "svc1" if svc1_dirty else "svc2"
    expected2 = yaml_load(
        TEST_TASK.substitute(
            commit="commit",
            commit_url="https://example.com",
            deadline=stringDate(now + DEADLINE),
            dockerfile=f"{svc}/Dockerfile",
            expires=stringDate(now + ARTIFACTS_EXPIRE),
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            route=f"index.project.fuzzing.orion.{svc}.main",
            scheduler=SCHEDULER_ID,
            service_name=svc,
            source_url=SOURCE_URL,
            task_group="group",
            test_name=f"{svc}test",
            worker=WORKER_TYPE,
        ))
    if ci1_dirty:
        expected_image["taskId"] = task1_id
        expected2["dependencies"].append(task1_id)
    expected2["payload"]["image"] = expected_image
    sched.services[svc].tests[0].update_task(expected2, "https://example.com",
                                             "fetch", "commit", svc)
    task2_id, task2 = queue.createTask.call_args_list[call_idx].args
    call_idx += 1
    assert task2 == expected2
    task3_id, task3 = queue.createTask.call_args_list[call_idx].args
    call_idx += 1
    expected3 = yaml_load(
        BUILD_TASK.substitute(
            clone_url="https://example.com",
            commit="commit",
            deadline=stringDate(now + DEADLINE),
            dockerfile=f"{svc}/Dockerfile",
            expires=stringDate(now + ARTIFACTS_EXPIRE),
            load_deps="0",
            max_run_time=int(MAX_RUN_TIME.total_seconds()),
            now=stringDate(now),
            owner_email=OWNER_EMAIL,
            provisioner=PROVISIONER_ID,
            route=f"index.project.fuzzing.orion.{svc}.main",
            scheduler=SCHEDULER_ID,
            service_name=svc,
            source_url=SOURCE_URL,
            task_group="group",
            worker=WORKER_TYPE,
        ))
    expected3["dependencies"].append(task2_id)
    assert task3 == expected3
Exemplo n.º 27
0
def test_ci_create_02(
    mocker: MockerFixture,
    platform: str,
    matrix_secret: Optional[str],
    job_secret: Optional[str],
) -> None:
    """test single stage CI task creation"""
    taskcluster = mocker.patch("orion_decision.ci_scheduler.Taskcluster",
                               autospec=True)
    queue = mocker.Mock()
    index = mocker.Mock()
    index.findTask.return_value = {"taskId": "msys-task"}
    taskcluster.get_service.side_effect = lambda x: {
        "index": index,
        "queue": queue
    }[x]
    now = datetime.utcnow()
    evt = mocker.Mock(
        branch="dev",
        event_type="push",
        ssh_url="ssh://repo",
        http_url="test://repo",
        fetch_ref="fetchref",
        repo_slug="project/test",
        tag=None,
        commit="commit",
        user="******",
        spec=GithubEvent(),
    )
    mtx = mocker.patch("orion_decision.ci_scheduler.CIMatrix", autospec=True)
    job = MatrixJob(
        name="testjob",
        language="python",
        version="3.7",
        platform=platform,
        env={},
        script=["test"],
    )
    mtx.return_value.jobs = [job]
    secrets = []
    scopes = []
    clone_repo = evt.http_url

    def _create_secret(kind: str) -> CISecret:
        nonlocal clone_repo
        sec: CISecret
        if kind == "env":
            sec = CISecretEnv("project/test/token", "TOKEN")
        elif kind == "deploy":
            clone_repo = evt.ssh_url
            sec = CISecretKey("project/test/key")
        elif kind == "key":
            sec = CISecretKey("project/test/key", hostname="host")
        elif kind == "file":
            sec = CISecretFile("project/test/cfg", "/cfg")
        else:
            assert False, f"unknown secret kind: {kind}"
        scopes.append(f"secrets:get:{sec.secret}")
        return sec

    if job_secret is not None:
        sec = _create_secret(job_secret)
        job.secrets.append(sec)
    if matrix_secret is not None:
        sec = _create_secret(matrix_secret)
        secrets.append(sec)
    mtx.return_value.secrets = secrets
    sched = CIScheduler("test", evt, now, "group", {})
    sched.create_tasks()
    assert queue.createTask.call_count == 1
    _, task = queue.createTask.call_args[0]
    # add matrix secrets to `job`. this is different than how it's done in the
    # scheduler, but will have the same effect (and the scheduler is done with `job`)
    job.secrets.extend(secrets)
    kwds = {
        "ci_job": json_dump(str(job)),
        "clone_repo": clone_repo,
        "deadline": stringDate(now + DEADLINE),
        "fetch_ref": evt.fetch_ref,
        "fetch_rev": evt.commit,
        "http_repo": evt.http_url,
        "max_run_time": int(MAX_RUN_TIME.total_seconds()),
        "name": job.name,
        "now": stringDate(now),
        "project": "test",
        "provisioner": PROVISIONER_ID,
        "scheduler": SCHEDULER_ID,
        "task_group": "group",
        "user": evt.user,
        "worker": WORKER_TYPES[platform],
    }
    if platform == "linux":
        kwds["image"] = job.image
    else:
        assert index.findTask.call_count == 1
        assert job.image in index.findTask.call_args[0][0]
        kwds["msys_task"] = "msys-task"
    expected = yaml_load(TEMPLATES[platform].substitute(**kwds))
    expected["requires"] = "all-resolved"
    expected["scopes"].extend(scopes)
    if matrix_secret is not None or job_secret is not None:
        expected["payload"].setdefault("features", {})
        expected["payload"]["features"]["taskclusterProxy"] = True
    assert set(task["scopes"]) == set(expected["scopes"])
    assert len(task["scopes"]) == len(expected["scopes"])
    task["scopes"] = expected["scopes"]
    assert task == expected
    assert all(sec.secret in task["payload"]["env"]["CI_JOB"]
               for sec in job.secrets)
Exemplo n.º 28
0
    def create_tasks(self) -> None:
        """Create CI tasks in Taskcluster."""
        job_tasks = {id(job): slugId() for job in self.matrix.jobs}
        prev_stage: List[str] = []
        for stage in sorted({job.stage for job in self.matrix.jobs}):
            this_stage = []
            for job in self.matrix.jobs:
                if job.stage != stage:
                    continue
                task_id = job_tasks[id(job)]
                this_stage.append(task_id)
                has_deploy_key = any(
                    isinstance(sec, CISecretKey) and sec.hostname is None
                    for sec in chain(self.matrix.secrets, job.secrets))
                if has_deploy_key:
                    clone_repo = self.github_event.ssh_url
                else:
                    clone_repo = self.github_event.http_url
                job_ser = job.serialize()
                assert isinstance(job_ser["secrets"], list)
                job_ser["secrets"].extend(secret.serialize()
                                          for secret in self.matrix.secrets)
                # set CI environment vars for compatibility with eg. codecov
                job_ser["env"].update({
                    "CI":
                    "true",
                    "CI_BUILD_ID":
                    self.task_group,
                    "CI_BUILD_URL":
                    f"{TASKCLUSTER_ROOT_URL}/tasks/{task_id}",
                    "CI_JOB_ID":
                    task_id,
                    "VCS_BRANCH_NAME":
                    self.github_event.branch,
                    "VCS_COMMIT_ID":
                    self.github_event.commit,
                    "VCS_PULL_REQUEST":
                    str(self.github_event.pull_request or "false"),
                    "VCS_SLUG":
                    self.github_event.repo_slug,
                    "VCS_TAG":
                    self.github_event.tag or "",
                })
                kwds = {
                    # need to json.dump twice so we get a string literal in the yaml
                    # template. otherwise (since it's yaml) it would be interpreted
                    # as an object.
                    "ci_job": json_dump(json_dump(job_ser)),
                    "clone_repo": clone_repo,
                    "deadline": stringDate(self.now + DEADLINE),
                    "fetch_ref": self.github_event.fetch_ref,
                    "fetch_rev": self.github_event.commit,
                    "http_repo": self.github_event.http_url,
                    "max_run_time": int(MAX_RUN_TIME.total_seconds()),
                    "name": job.name,
                    "now": stringDate(self.now),
                    "project": self.project_name,
                    "provisioner": PROVISIONER_ID,
                    "scheduler": SCHEDULER_ID,
                    "task_group": self.task_group,
                    "user": self.github_event.user,
                    "worker": WORKER_TYPES[job.platform],
                }
                if job.platform == "windows":
                    # need to resolve "image" to a task ID where the MSYS
                    # artifact is
                    idx = Taskcluster.get_service("index")
                    result = idx.findTask(
                        f"project.fuzzing.orion.{job.image}.master")
                    kwds["msys_task"] = result["taskId"]
                elif job.platform == "macos":
                    # need to resolve "image" to a task ID where the Homebrew
                    # artifact is
                    idx = Taskcluster.get_service("index")
                    result = idx.findTask(
                        f"project.fuzzing.orion.{job.image}.master")
                    kwds["homebrew_task"] = result["taskId"]
                else:
                    kwds["image"] = job.image
                task = yaml_load(TEMPLATES[job.platform].substitute(**kwds))
                # if any secrets exist, use the proxy and request scopes
                if job.secrets or self.matrix.secrets:
                    task["payload"].setdefault("features", {})
                    task["payload"]["features"]["taskclusterProxy"] = True
                    for sec in chain(job.secrets, self.matrix.secrets):
                        task["scopes"].append(f"secrets:get:{sec.secret}")
                    # ensure scopes are unique
                    task["scopes"] = list(set(task["scopes"]))
                if not job.require_previous_stage_pass:
                    task["requires"] = "all-resolved"
                task["dependencies"].extend(prev_stage)
                LOG.info("task %s: %s", task_id, task["metadata"]["name"])
                if not self.dry_run:
                    try:
                        Taskcluster.get_service("queue").createTask(
                            task_id, task)
                    except TaskclusterFailure as exc:  # pragma: no cover
                        LOG.error("Error creating CI task: %s", exc)
                        raise

            prev_stage = this_stage
Exemplo n.º 29
0
 def _create_build_task(
     self, service, dirty_dep_tasks, test_tasks, service_build_tasks
 ):
     if self.github_event.pull_request is not None:
         build_index = (
             f"index.project.fuzzing.orion.{service.name}"
             f".pull_request.{self.github_event.pull_request}"
         )
     else:
         build_index = (
             f"index.project.fuzzing.orion.{service.name}"
             f".{self.github_event.branch}"
         )
     assert self.now is not None
     if isinstance(service, ServiceMsys):
         if service.base.endswith(".exe"):
             if not service.base.endswith(".sfx.exe"):
                 LOG.warning("'base' ends with .exe, but not .sfx.exe", service.base)
             task_template = MSYS_EXE_TASK
         else:
             task_template = MSYS_TASK
         build_task = yaml_load(
             task_template.substitute(
                 clone_url=self.github_event.http_url,
                 commit=self.github_event.commit,
                 deadline=stringDate(self.now + DEADLINE),
                 expires=stringDate(self.now + ARTIFACTS_EXPIRE),
                 max_run_time=int(MAX_RUN_TIME.total_seconds()),
                 msys_base_url=service.base,
                 now=stringDate(self.now),
                 owner_email=OWNER_EMAIL,
                 provisioner=PROVISIONER_ID,
                 route=build_index,
                 scheduler=SCHEDULER_ID,
                 service_name=service.name,
                 setup_sh_path=str(
                     (service.root / "setup.sh").relative_to(service.context)
                 ),
                 source_url=SOURCE_URL,
                 task_group=self.task_group,
                 worker=WORKER_TYPE_MSYS,
             )
         )
     elif isinstance(service, ServiceHomebrew):
         build_task = yaml_load(
             HOMEBREW_TASK.substitute(
                 clone_url=self.github_event.http_url,
                 commit=self.github_event.commit,
                 deadline=stringDate(self.now + DEADLINE),
                 expires=stringDate(self.now + ARTIFACTS_EXPIRE),
                 max_run_time=int(MAX_RUN_TIME.total_seconds()),
                 homebrew_base_url=service.base,
                 now=stringDate(self.now),
                 owner_email=OWNER_EMAIL,
                 provisioner=PROVISIONER_ID,
                 route=build_index,
                 scheduler=SCHEDULER_ID,
                 service_name=service.name,
                 setup_sh_path=str(
                     (service.root / "setup.sh").relative_to(service.context)
                 ),
                 source_url=SOURCE_URL,
                 task_group=self.task_group,
                 worker=WORKER_TYPE_BREW,
             )
         )
     else:
         build_task = yaml_load(
             BUILD_TASK.substitute(
                 clone_url=self.github_event.http_url,
                 commit=self.github_event.commit,
                 deadline=stringDate(self.now + DEADLINE),
                 dockerfile=str(service.dockerfile.relative_to(service.context)),
                 expires=stringDate(self.now + ARTIFACTS_EXPIRE),
                 load_deps="1" if dirty_dep_tasks else "0",
                 max_run_time=int(MAX_RUN_TIME.total_seconds()),
                 now=stringDate(self.now),
                 owner_email=OWNER_EMAIL,
                 provisioner=PROVISIONER_ID,
                 route=build_index,
                 scheduler=SCHEDULER_ID,
                 service_name=service.name,
                 source_url=SOURCE_URL,
                 task_group=self.task_group,
                 worker=WORKER_TYPE,
             )
         )
     build_task["dependencies"].extend(dirty_dep_tasks + test_tasks)
     task_id = service_build_tasks[service.name]
     LOG.info(
         "%s task %s: %s", self._create_str, task_id, build_task["metadata"]["name"]
     )
     if not self.dry_run:
         try:
             Taskcluster.get_service("queue").createTask(task_id, build_task)
         except TaskclusterFailure as exc:  # pragma: no cover
             LOG.error("Error creating build task: %s", exc)
             raise
     return task_id
Exemplo n.º 30
0
    def build_tasks(self, parent_task_id, env=None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()
        preprocess_task_id = None

        preprocess = self.create_preprocess()
        if preprocess is not None:
            task = yaml.safe_load(
                FUZZING_TASK.substitute(
                    created=stringDate(now),
                    deadline=stringDate(now + timedelta(
                        seconds=preprocess.max_run_time)),
                    description=DESCRIPTION.replace("\n", "\\n"),
                    expires=stringDate(fromNow("1 week", now)),
                    max_run_time=preprocess.max_run_time,
                    name=f"Fuzzing task {self.task_id} - preprocess",
                    owner_email=OWNER_EMAIL,
                    pool_id=self.pool_id,
                    provisioner=PROVISIONER_ID,
                    scheduler=SCHEDULER_ID,
                    secret=DECISION_TASK_SECRET,
                    task_group=parent_task_id,
                    task_id=self.task_id,
                ))
            task["payload"]["artifacts"].update(
                preprocess.artifact_map(stringDate(fromNow("1 week", now))))
            task["payload"]["env"]["TASKCLUSTER_FUZZING_PREPROCESS"] = "1"
            # `container` can be either a string or a dict, so can't template it
            task["payload"]["image"] = preprocess.container
            task["scopes"] = sorted(chain(preprocess.scopes, task["scopes"]))
            add_capabilities_for_scopes(task)
            if env is not None:
                assert set(task["payload"]["env"]).isdisjoint(set(env))
                task["payload"]["env"].update(env)

            preprocess_task_id = slugId()
            yield preprocess_task_id, task

        for i in range(1, self.tasks + 1):
            task = yaml.safe_load(
                FUZZING_TASK.substitute(
                    created=stringDate(now),
                    deadline=stringDate(now +
                                        timedelta(seconds=self.max_run_time)),
                    description=DESCRIPTION.replace("\n", "\\n"),
                    expires=stringDate(fromNow("1 week", now)),
                    max_run_time=self.max_run_time,
                    name=f"Fuzzing task {self.task_id} - {i}/{self.tasks}",
                    owner_email=OWNER_EMAIL,
                    pool_id=self.pool_id,
                    provisioner=PROVISIONER_ID,
                    scheduler=SCHEDULER_ID,
                    secret=DECISION_TASK_SECRET,
                    task_group=parent_task_id,
                    task_id=self.task_id,
                ))
            task["payload"]["artifacts"].update(
                self.artifact_map(stringDate(fromNow("1 week", now))))
            # `container` can be either a string or a dict, so can't template it
            task["payload"]["image"] = self.container
            if preprocess_task_id is not None:
                task["dependencies"].append(preprocess_task_id)
            task["scopes"] = sorted(chain(self.scopes, task["scopes"]))
            add_capabilities_for_scopes(task)
            if env is not None:
                assert set(task["payload"]["env"]).isdisjoint(set(env))
                task["payload"]["env"].update(env)

            yield slugId(), task
Exemplo n.º 31
0
def test_ci_create_03(mocker: MockerFixture, previous_pass: bool) -> None:
    """test two stage CI task creation"""
    taskcluster = mocker.patch("orion_decision.ci_scheduler.Taskcluster",
                               autospec=True)
    queue = taskcluster.get_service.return_value
    now = datetime.utcnow()
    evt = mocker.Mock(
        branch="dev",
        event_type="push",
        http_url="test://repo",
        fetch_ref="fetchref",
        commit="commit",
        user="******",
        repo_slug="project/test",
        tag=None,
        spec=GithubEvent(),
    )
    mtx = mocker.patch("orion_decision.ci_scheduler.CIMatrix", autospec=True)
    job1 = MatrixJob(
        name="testjob1",
        language="python",
        version="3.7",
        platform="linux",
        env={},
        script=["test"],
    )
    job2 = MatrixJob(
        name="testjob2",
        language="python",
        version="3.7",
        platform=job1.platform,
        env={},
        script=["test"],
        stage=2,
        previous_pass=previous_pass,
    )
    mtx.return_value.jobs = [job1, job2]
    mtx.return_value.secrets = []
    sched = CIScheduler("test", evt, now, "group", {})
    sched.create_tasks()
    assert queue.createTask.call_count == 2
    task1_id, task1 = queue.createTask.call_args_list[0][0]
    kwds = {
        "ci_job": json_dump(str(job1)),
        "clone_repo": evt.http_url,
        "deadline": stringDate(now + DEADLINE),
        "fetch_ref": evt.fetch_ref,
        "fetch_rev": evt.commit,
        "http_repo": evt.http_url,
        "max_run_time": int(MAX_RUN_TIME.total_seconds()),
        "name": job1.name,
        "now": stringDate(now),
        "project": "test",
        "provisioner": PROVISIONER_ID,
        "scheduler": SCHEDULER_ID,
        "task_group": "group",
        "user": evt.user,
        "worker": WORKER_TYPES[job1.platform],
    }
    kwds["image"] = job1.image
    expected = yaml_load(TEMPLATES[job1.platform].substitute(**kwds))
    expected["requires"] = "all-resolved"
    assert task1 == expected

    _, task2 = queue.createTask.call_args_list[1][0]
    kwds["ci_job"] = json_dump(str(job2))
    kwds["image"] = job2.image
    kwds["name"] = job2.name
    kwds["worker"] = WORKER_TYPES[job2.platform]
    expected = yaml_load(TEMPLATES[job2.platform].substitute(**kwds))
    if not previous_pass:
        expected["requires"] = "all-resolved"
    expected["dependencies"].append(task1_id)
    assert task2 == expected
Exemplo n.º 32
0
    def build_tasks(self, parent_task_id, env=None):
        """Create fuzzing tasks and attach them to a decision task"""
        now = datetime.utcnow()
        deps = [parent_task_id]

        preprocess = self.create_preprocess()
        if preprocess is not None:
            task_id = slugId()
            task = {
                "taskGroupId":
                parent_task_id,
                "dependencies": [parent_task_id],
                "created":
                stringDate(now),
                "deadline":
                stringDate(now + timedelta(seconds=preprocess.max_run_time)),
                "expires":
                stringDate(fromNow("1 week", now)),
                "extra": {},
                "metadata": {
                    "description": DESCRIPTION,
                    "name": f"Fuzzing task {self.task_id} - preprocess",
                    "owner": OWNER_EMAIL,
                    "source": "https://github.com/MozillaSecurity/fuzzing-tc",
                },
                "payload": {
                    "artifacts":
                    preprocess.artifact_map(stringDate(fromNow("1 week",
                                                               now))),
                    "cache": {},
                    "capabilities": {},
                    "env": {
                        "TASKCLUSTER_FUZZING_POOL": self.pool_id,
                        "TASKCLUSTER_SECRET": DECISION_TASK_SECRET,
                        "TASKCLUSTER_FUZZING_PREPROCESS": "1",
                    },
                    "features": {
                        "taskclusterProxy": True
                    },
                    "image":
                    preprocess.container,
                    "maxRunTime":
                    preprocess.max_run_time,
                },
                "priority":
                "high",
                "provisionerId":
                PROVISIONER_ID,
                "workerType":
                self.task_id,
                "retries":
                5,
                "routes": [],
                "schedulerId":
                SCHEDULER_ID,
                "scopes":
                preprocess.scopes + [f"secrets:get:{DECISION_TASK_SECRET}"],
                "tags": {},
            }
            add_capabilities_for_scopes(task)
            if env is not None:
                assert set(task["payload"]["env"]).isdisjoint(set(env))
                task["payload"]["env"].update(env)
            deps.append(task_id)

            yield task_id, task

        for i in range(1, self.tasks + 1):
            task_id = slugId()
            task = {
                "taskGroupId": parent_task_id,
                "dependencies": deps,
                "created": stringDate(now),
                "deadline":
                stringDate(now + timedelta(seconds=self.max_run_time)),
                "expires": stringDate(fromNow("1 week", now)),
                "extra": {},
                "metadata": {
                    "description": DESCRIPTION,
                    "name": f"Fuzzing task {self.task_id} - {i}/{self.tasks}",
                    "owner": OWNER_EMAIL,
                    "source": "https://github.com/MozillaSecurity/fuzzing-tc",
                },
                "payload": {
                    "artifacts":
                    self.artifact_map(stringDate(fromNow("1 week", now))),
                    "cache": {},
                    "capabilities": {},
                    "env": {
                        "TASKCLUSTER_FUZZING_POOL": self.pool_id,
                        "TASKCLUSTER_SECRET": DECISION_TASK_SECRET,
                    },
                    "features": {
                        "taskclusterProxy": True
                    },
                    "image":
                    self.container,
                    "maxRunTime":
                    self.max_run_time,
                },
                "priority": "high",
                "provisionerId": PROVISIONER_ID,
                "workerType": self.task_id,
                "retries": 5,
                "routes": [],
                "schedulerId": SCHEDULER_ID,
                "scopes":
                self.scopes + [f"secrets:get:{DECISION_TASK_SECRET}"],
                "tags": {},
            }
            add_capabilities_for_scopes(task)
            if env is not None:
                assert set(task["payload"]["env"]).isdisjoint(set(env))
                task["payload"]["env"].update(env)

            yield task_id, task