async def test_fetch_hook_managed_filter(Hooks, make_hook): "The managed resource dictate which hooks are fetched, including which groups" resources = Resources( [], ["Hook=garbage/.*", "Hook=proj.*", "Hook=imbstack/test4.*"]) hooks = [ # managed: make_hook(hookGroupId="garbage", hookId="test1"), make_hook(hookGroupId="garbage", hookId="test2"), make_hook(hookGroupId="project:gecko", hookId="test3"), # not managed: make_hook(hookGroupId="imbstack", hookId="test5"), # but imbstack is fetched make_hook(hookGroupId="notmanaged", hookId="test5"), ] Hooks.hooks.extend(hooks) await fetch_hooks(resources) assert list(resources) == sorted([Hook.from_api(h) for h in hooks[:3]]) assert Hooks.listHookCalls == ["garbage", "imbstack", "project:gecko"]
async def update_resources(resources, secret_values): projects = await Projects.load(loader) for project in projects.values(): for roleId in project.adminRoles: assert any(roleId.startswith(p) for p in ADMIN_ROLE_PREFIXES) resources.add( Role( roleId=roleId, description="", scopes=["assume:project-admin:{}".format(project.name)], )) if project.repos: for repo in project.repos: assert repo.endswith("/*") or repo.endswith( ":*" ), "project.repos should end with `/*` or `:*`, got {}".format( repo) resources.add( Role( roleId="project-admin:{}".format(project.name), description="", scopes=[ "assume:repo-admin:{}".format(repo) for repo in project.repos ], )) if project.workerPools: for name, worker_pool in project.workerPools.items(): worker_pool_id = "proj-{}/{}".format(project.name, name) worker_pool["description"] = "Workers for " + project.name worker_pool, secret = build_worker_pool( worker_pool_id, worker_pool, secret_values) if project.externallyManaged.manage_individual_resources(): resources.manage("WorkerPool={}".format(worker_pool_id)) if secret: resources.manage( "Secret=worker-pool:{}".format(worker_pool_id)) resources.add(worker_pool) if secret: resources.add(secret) if project.clients: for name, info in project.clients.items(): clientId = "project/{}/{}".format(project.name, name) if project.externallyManaged.manage_individual_resources(): resources.manage("Client={}".format(clientId)) description = info.get("description", "") scopes = info["scopes"] resources.add( Client(clientId=clientId, description=description, scopes=scopes)) if project.secrets: for nameSuffix, info in project.secrets.items(): if info is True: continue name = "project/{}/{}".format(project.name, nameSuffix) if project.externallyManaged.manage_individual_resources(): resources.manage("Secret={}".format(name)) if secret_values: resources.add( Secret(name=name, secret=secret_values.render(info))) else: resources.add(Secret(name=name)) if project.hooks: for hookId, info in project.hooks.items(): hookGroupId = "project-{}".format(project.name) if project.externallyManaged.manage_individual_resources(): resources.manage("Hook={}/{}".format(hookGroupid, hookId)) assert ( "bindings" not in info ), "Please add support for bindings to use this feature" resources.add( Hook( hookGroupId=hookGroupId, hookId=hookId, name=info.get("name", hookId), description=info.get("description", ""), owner=info["owner"], emailOnError=info.get("emailOnError", False), schedule=info.get("schedule", ()), bindings=info.get("bindings", ()), task=info["task"], triggerSchema=info.get("triggerSchema", {}), )) for grant in Grants.from_project(project): if project.externallyManaged.manage_individual_resources(): for role in grant.to: resources.manage("Role=" + re.escape(role)) grant.update_resources(resources)
async def test_fetch_hook(Hooks, make_hook): resources = Resources([], [".*"]) api_hook = make_hook() Hooks.hooks.append(api_hook) await fetch_hooks(resources) assert list(resources) == [Hook.from_api(api_hook)]
async def register_hooks(resources): resources.manage("Hook=project-servo/.*") for config in parse_yaml("hooks.yml"): resources.add(Hook(**config))
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set(itertools.chain.from_iterable(pool.scopes for pool in pools))) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "minCapacity": 0, "maxCapacity": max(sum(pool.tasks for pool in pools) * 2, 3), "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("12h"), }, } # Mandatory scopes to execute the hook # or create new tasks decision_task_scopes = ( f"queue:scheduler-id:{SCHEDULER_ID}", f"queue:create-task:highest:{PROVISIONER_ID}/{self.task_id}", f"secrets:get:{DECISION_TASK_SECRET}", ) # Build the decision task payload that will trigger the new fuzzing tasks decision_task = { "created": { "$fromNow": "0 seconds" }, "deadline": { "$fromNow": "1 hour" }, "expires": { "$fromNow": "1 week" }, "extra": {}, "metadata": { "description": DESCRIPTION, "name": f"Fuzzing decision {self.task_id}", "owner": OWNER_EMAIL, "source": "https://github.com/MozillaSecurity/fuzzing-tc", }, "payload": { "artifacts": {}, "cache": {}, "capabilities": {}, "env": { "TASKCLUSTER_SECRET": DECISION_TASK_SECRET }, "features": { "taskclusterProxy": True }, "image": { "type": "indexed-image", "path": "public/fuzzing-tc-decision.tar", "namespace": "project.fuzzing.config.master", }, "command": ["fuzzing-decision", self.pool_id], "maxRunTime": parse_time("1h"), }, "priority": "high", "provisionerId": PROVISIONER_ID, "workerType": self.task_id, "retries": 5, "routes": [], "schedulerId": SCHEDULER_ID, "scopes": all_scopes + decision_task_scopes, "tags": {}, } add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"].keys()).isdisjoint( set(env.keys())) decision_task["payload"]["env"].update(env) pool = WorkerPool( workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", providerId=PROVIDER_IDS[self.cloud], description=DESCRIPTION, owner=OWNER_EMAIL, emailOnError=True, config=config, ) hook = Hook( hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, description="Generated Fuzzing hook", owner=OWNER_EMAIL, emailOnError=True, schedule=list(self.cycle_crons()), task=decision_task, bindings=(), triggerSchema={}, ) role = Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=all_scopes + decision_task_scopes, ) return [pool, hook, role]
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set(chain.from_iterable(pool.scopes for pool in pools))) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), }, "maxCapacity": max(sum(pool.tasks for pool in pools) * 2, 3), "minCapacity": 0, } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, )) decision_task["scopes"] = sorted( chain(decision_task["scopes"], all_scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) pool = WorkerPool( config=config, description=DESCRIPTION.replace("\n", "\\n"), emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) hook = Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) role = Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=decision_task["scopes"], ) return [pool, hook, role]
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), }, "maxCapacity": ( # add +1 to expected size, so if we manually trigger the hook, the new # decision can run without also manually cancelling a task # * 2 since Taskcluster seems to not reuse workers very quickly in some # cases, so we end up with a lot of pending tasks. max(1, math.ceil(self.max_run_time / self.cycle_time)) * self.tasks * 2 + 1), "minCapacity": 0, } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, )) decision_task["scopes"] = sorted( chain(decision_task["scopes"], self.scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) pool = WorkerPool( config=config, description=DESCRIPTION, emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) hook = Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) role = Role( description=DESCRIPTION, roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", scopes=decision_task["scopes"], ) return [pool, hook, role]
def build_resources( self, providers: Dict[str, Provider], machine_types: MachineTypes, env=None, ) -> Generator[Union[WorkerPool, Hook, Role], None, None]: """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set( chain.from_iterable( cast(PoolConfiguration, pool).get_scopes() for pool in pools ) ) ) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) assert self.imageset is not None assert machines is not None assert self.disk_size is not None assert self.platform is not None config: Dict[str, object] = { "launchConfigs": provider.build_launch_configs( self.imageset, machines, self.disk_size, self.platform ), "maxCapacity": max(sum(pool.tasks for pool in pools if pool.tasks) * 2, 3), "minCapacity": 0, } if self.platform == "linux": config["lifecycle"] = { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, ) ) decision_task["scopes"] = sorted(chain(decision_task["scopes"], all_scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) assert self.cloud is not None if self.cloud != "static": yield WorkerPool( config=config, description=DESCRIPTION.replace("\n", "\\n"), emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) self_cycle_crons = self.cycle_crons() assert self_cycle_crons is not None yield Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) yield Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=decision_task["scopes"] + ["queue:create-task:highest:proj-fuzzing/ci"], )