def test_from_resources(): resources = Resources( resources=[ Role(roleId="role1", description="1", scopes=["one"]), Role(roleId="role2", description="2", scopes=["two"]), ], managed=["Role=role*"], ) res = Resolver.from_resources(resources) assert sorted(res.expandScopes(["assume:role1" ])) == ["assume:role1", "one"]
async def test_fetch_roles_managed(AuthForRoles, make_role): "When a role is present and managed, it is included" resources = Resources([], [".*"]) api_role = make_role() AuthForRoles.roles.append(api_role) await fetch_roles(resources) assert list(resources) == [Role.from_api(api_role)]
async def build_worker_pool(workerPoolId, cfg, secret_values): try: image_set = await get_image_set(cfg["imageset"]) wp = CLOUD_FUNCS[cfg["cloud"]]( secret_values=secret_values, image_set=image_set, **cfg, ) if wp.supports_worker_config(): wp.merge_worker_config( # The order is important here: earlier entries take precendence # over later entries. cfg.get("workerConfig", {}), image_set.workerConfig, WorkerPoolSettings.EXISTING_CONFIG, ) wp = WORKER_IMPLEMENTATION_FUNCS[ image_set.workerImplementation.replace("-", "_")]( secret_values=secret_values, wp=wp, **cfg, ) except Exception as e: raise RuntimeError( "Error generating worker pool configuration for {}".format( workerPoolId)) from e if wp.secret_tpl: if secret_values: secret = Secret( name="worker-pool:{}".format(workerPoolId), secret=secret_values.render(wp.secret_tpl), ) else: secret = Secret(name="worker-pool:{}".format(workerPoolId)) else: secret = None if wp.scopes: role = Role( roleId="worker-pool:{}".format(workerPoolId), description="Scopes for image set `{}` and cloud `{}`.".format( image_set.name, cfg["cloud"]), scopes=wp.scopes, ) else: role = None workerpool = WorkerPool( workerPoolId=workerPoolId, description=cfg.get("description", ""), owner=cfg.get("owner", "*****@*****.**"), emailOnError=cfg.get("emailOnError", False), providerId=wp.provider_id, config=wp.config, ) return workerpool, secret, role
async def test_fetch_roles_unmanaged(AuthForRoles, make_role): "When a role is present and unmanaged, it is not included" resources = Resources([], ["Role=managed*"]) api_role1 = make_role(roleId="managed-role") api_role2 = make_role(roleId="un-managed-role") AuthForRoles.roles.extend([api_role1, api_role2]) await fetch_roles(resources) assert list(resources) == [Role.from_api(api_role1)]
def update_resources(self, resources): scopes = self.grant for roleId in self.to: id = f"Role={roleId}" if not resources.is_managed(id): resources.manage(re.escape(id)) resources.add( Role(roleId=roleId, description="", scopes=scopes))
async def update_resources(resources): resources.manage(r"Role=login-identity:.*") resources.add(Role( roleId="login-identity:*", description=textwrap.dedent("""\ Scopes for anyone who logs into the service; see [login-identities docs](https://docs.taskcluster.net/docs/manual/design/conventions/login-identities)."""), scopes=[ 'auth:create-client:<..>/*', 'auth:delete-client:<..>/*', 'auth:reset-access-token:<..>/*', 'auth:update-client:<..>/*', ]))
async def register_roles(resources): externally_managed = [] roles = [] for config in parse_yaml("roles.yml"): if "externally-managed" in config: externally_managed.append(config["roleId"]) else: roles.append(Role(**config)) role_re_prefix = "Role=" + re_not_match(externally_managed) resources.manage(role_re_prefix + "repo:github.com/servo/servo:.*") resources.manage(role_re_prefix + "hook-id:project-servo/.*") resources.manage(role_re_prefix + "project:servo:.*") for role in roles: resources.add(role)
async def update_resources(resources): projects = await Projects.load(loader) for project in projects.values(): for roleId in project.adminRoles: assert any(roleId.startswith(p) for p in ADMIN_ROLE_PREFIXES) resources.add(Role( roleId=roleId, description="", scopes=['assume:project-admin:{}'.format(project.name)])) if project.repos: resources.add(Role( roleId='project-admin:{}'.format(project.name), description="", scopes=['assume:repo-admin:{}'.format(repo) for repo in project.repos])) if project.workerPools: for name, worker_pool in project.workerPools.items(): worker_pool_id = 'proj-{}/{}'.format(project.name, name) if project.externallyManaged: resources.manage('WorkerPool={}'.format(worker_pool_id)) worker_pool['description'] = "Workers for " + project.name resources.add(build_worker_pool(worker_pool_id, worker_pool)) if project.clients: for name, info in project.clients.items(): clientId = 'project/{}/{}'.format(project.name, name) if project.externallyManaged: resources.manage('Client={}'.format(client_id)) description = info.get('description', '') scopes = info['scopes'] resources.add(Client( clientId=clientId, description=description, scopes=scopes)) for grant in Grants.from_project(project): grant.update_resources(resources)
async def update_resources(resources): resources.manage(r"Role=project-admin:.*") resources.manage('Role=repo:.*') for prefix in ADMIN_ROLE_PREFIXES: resources.manage(r'Role={}.*'.format(prefix)) projects = await Projects.load(loader) for project in projects.values(): for roleId in project.adminRoles: assert any(roleId.startswith(p) for p in ADMIN_ROLE_PREFIXES) resources.add( Role(roleId=roleId, description="", scopes=['assume:project-admin:{}'.format(project.name)])) if project.workerPools: resources.manage('WorkerPool=proj-{}/.*'.format(project.name)) for name, worker_pool in project.workerPools.items(): worker_pool_id = 'proj-{}/{}'.format(project.name, name) worker_pool['description'] = "Workers for " + project.name resources.add(build_worker_pool(worker_pool_id, worker_pool)) for grant in Grants.from_project(project): grant.update_resources(resources)
async def update_resources(resources, secret_values): projects = await Projects.load(loader) for project in projects.values(): for roleId in project.adminRoles: assert any(roleId.startswith(p) for p in ADMIN_ROLE_PREFIXES) resources.add( Role( roleId=roleId, description="", scopes=["assume:project-admin:{}".format(project.name)], )) if project.repos: for repo in project.repos: assert repo.endswith("/*") or repo.endswith( ":*" ), "project.repos should end with `/*` or `:*`, got {}".format( repo) resources.add( Role( roleId="project-admin:{}".format(project.name), description="", scopes=[ "assume:repo-admin:{}".format(repo) for repo in project.repos ], )) if project.workerPools: for name, worker_pool in project.workerPools.items(): worker_pool_id = "proj-{}/{}".format(project.name, name) worker_pool["description"] = "Workers for " + project.name worker_pool, secret = build_worker_pool( worker_pool_id, worker_pool, secret_values) if project.externallyManaged.manage_individual_resources(): resources.manage("WorkerPool={}".format(worker_pool_id)) if secret: resources.manage( "Secret=worker-pool:{}".format(worker_pool_id)) resources.add(worker_pool) if secret: resources.add(secret) if project.clients: for name, info in project.clients.items(): clientId = "project/{}/{}".format(project.name, name) if project.externallyManaged.manage_individual_resources(): resources.manage("Client={}".format(clientId)) description = info.get("description", "") scopes = info["scopes"] resources.add( Client(clientId=clientId, description=description, scopes=scopes)) if project.secrets: for nameSuffix, info in project.secrets.items(): if info is True: continue name = "project/{}/{}".format(project.name, nameSuffix) if project.externallyManaged.manage_individual_resources(): resources.manage("Secret={}".format(name)) if secret_values: resources.add( Secret(name=name, secret=secret_values.render(info))) else: resources.add(Secret(name=name)) if project.hooks: for hookId, info in project.hooks.items(): hookGroupId = "project-{}".format(project.name) if project.externallyManaged.manage_individual_resources(): resources.manage("Hook={}/{}".format(hookGroupid, hookId)) assert ( "bindings" not in info ), "Please add support for bindings to use this feature" resources.add( Hook( hookGroupId=hookGroupId, hookId=hookId, name=info.get("name", hookId), description=info.get("description", ""), owner=info["owner"], emailOnError=info.get("emailOnError", False), schedule=info.get("schedule", ()), bindings=info.get("bindings", ()), task=info["task"], triggerSchema=info.get("triggerSchema", {}), )) for grant in Grants.from_project(project): if project.externallyManaged.manage_individual_resources(): for role in grant.to: resources.manage("Role=" + re.escape(role)) grant.update_resources(resources)
async def register_roles(resources): resources.manage("Role=repo:github.com/servo/servo:.*") resources.manage("Role=hook-id:project-servo/.*") resources.manage("Role=project:servo:.*") for config in parse_yaml("roles.yml"): resources.add(Role(**config))
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set(itertools.chain.from_iterable(pool.scopes for pool in pools))) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "minCapacity": 0, "maxCapacity": max(sum(pool.tasks for pool in pools) * 2, 3), "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("12h"), }, } # Mandatory scopes to execute the hook # or create new tasks decision_task_scopes = ( f"queue:scheduler-id:{SCHEDULER_ID}", f"queue:create-task:highest:{PROVISIONER_ID}/{self.task_id}", f"secrets:get:{DECISION_TASK_SECRET}", ) # Build the decision task payload that will trigger the new fuzzing tasks decision_task = { "created": { "$fromNow": "0 seconds" }, "deadline": { "$fromNow": "1 hour" }, "expires": { "$fromNow": "1 week" }, "extra": {}, "metadata": { "description": DESCRIPTION, "name": f"Fuzzing decision {self.task_id}", "owner": OWNER_EMAIL, "source": "https://github.com/MozillaSecurity/fuzzing-tc", }, "payload": { "artifacts": {}, "cache": {}, "capabilities": {}, "env": { "TASKCLUSTER_SECRET": DECISION_TASK_SECRET }, "features": { "taskclusterProxy": True }, "image": { "type": "indexed-image", "path": "public/fuzzing-tc-decision.tar", "namespace": "project.fuzzing.config.master", }, "command": ["fuzzing-decision", self.pool_id], "maxRunTime": parse_time("1h"), }, "priority": "high", "provisionerId": PROVISIONER_ID, "workerType": self.task_id, "retries": 5, "routes": [], "schedulerId": SCHEDULER_ID, "scopes": all_scopes + decision_task_scopes, "tags": {}, } add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"].keys()).isdisjoint( set(env.keys())) decision_task["payload"]["env"].update(env) pool = WorkerPool( workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", providerId=PROVIDER_IDS[self.cloud], description=DESCRIPTION, owner=OWNER_EMAIL, emailOnError=True, config=config, ) hook = Hook( hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, description="Generated Fuzzing hook", owner=OWNER_EMAIL, emailOnError=True, schedule=list(self.cycle_crons()), task=decision_task, bindings=(), triggerSchema={}, ) role = Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=all_scopes + decision_task_scopes, ) return [pool, hook, role]
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set(chain.from_iterable(pool.scopes for pool in pools))) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), }, "maxCapacity": max(sum(pool.tasks for pool in pools) * 2, 3), "minCapacity": 0, } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, )) decision_task["scopes"] = sorted( chain(decision_task["scopes"], all_scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) pool = WorkerPool( config=config, description=DESCRIPTION.replace("\n", "\\n"), emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) hook = Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) role = Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=decision_task["scopes"], ) return [pool, hook, role]
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), }, "maxCapacity": ( # add +1 to expected size, so if we manually trigger the hook, the new # decision can run without also manually cancelling a task # * 2 since Taskcluster seems to not reuse workers very quickly in some # cases, so we end up with a lot of pending tasks. max(1, math.ceil(self.max_run_time / self.cycle_time)) * self.tasks * 2 + 1), "minCapacity": 0, } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, )) decision_task["scopes"] = sorted( chain(decision_task["scopes"], self.scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) pool = WorkerPool( config=config, description=DESCRIPTION, emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) hook = Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) role = Role( description=DESCRIPTION, roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", scopes=decision_task["scopes"], ) return [pool, hook, role]
def update_resources(self, resources): scopes = self.grant for roleId in self.to: resources.add( Role(roleId=roleId, description='', scopes=scopes))
def build_resources( self, providers: Dict[str, Provider], machine_types: MachineTypes, env=None, ) -> Generator[Union[WorkerPool, Hook, Role], None, None]: """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set( chain.from_iterable( cast(PoolConfiguration, pool).get_scopes() for pool in pools ) ) ) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) assert self.imageset is not None assert machines is not None assert self.disk_size is not None assert self.platform is not None config: Dict[str, object] = { "launchConfigs": provider.build_launch_configs( self.imageset, machines, self.disk_size, self.platform ), "maxCapacity": max(sum(pool.tasks for pool in pools if pool.tasks) * 2, 3), "minCapacity": 0, } if self.platform == "linux": config["lifecycle"] = { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, ) ) decision_task["scopes"] = sorted(chain(decision_task["scopes"], all_scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) assert self.cloud is not None if self.cloud != "static": yield WorkerPool( config=config, description=DESCRIPTION.replace("\n", "\\n"), emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) self_cycle_crons = self.cycle_crons() assert self_cycle_crons is not None yield Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) yield Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=decision_task["scopes"] + ["queue:create-task:highest:proj-fuzzing/ci"], )