def build_worker_pool(workerPoolId, cfg, secret_values): try: wp = TYPES[cfg["type"]](secret_values=secret_values, **cfg) except Exception as e: raise RuntimeError( "Error generating worker pool configuration for {}".format( workerPoolId)) from e if secret_values: if "secret" in wp: secret = Secret(name="worker-pool:{}".format(workerPoolId), secret=wp.pop("secret")) else: secret = None else: secret = Secret(name="worker-pool:{}".format(workerPoolId)) workerpool = WorkerPool( workerPoolId=workerPoolId, description=cfg.get("description", ""), owner=cfg.get("owner", "*****@*****.**"), emailOnError=cfg.get("emailOnError", False), **wp, ) return workerpool, secret
async def build_worker_pool(workerPoolId, cfg, secret_values): try: image_set = await get_image_set(cfg["imageset"]) wp = CLOUD_FUNCS[cfg["cloud"]]( secret_values=secret_values, image_set=image_set, **cfg, ) if wp.supports_worker_config(): wp.merge_worker_config( # The order is important here: earlier entries take precendence # over later entries. cfg.get("workerConfig", {}), image_set.workerConfig, WorkerPoolSettings.EXISTING_CONFIG, ) wp = WORKER_IMPLEMENTATION_FUNCS[ image_set.workerImplementation.replace("-", "_")]( secret_values=secret_values, wp=wp, **cfg, ) except Exception as e: raise RuntimeError( "Error generating worker pool configuration for {}".format( workerPoolId)) from e if wp.secret_tpl: if secret_values: secret = Secret( name="worker-pool:{}".format(workerPoolId), secret=secret_values.render(wp.secret_tpl), ) else: secret = Secret(name="worker-pool:{}".format(workerPoolId)) else: secret = None if wp.scopes: role = Role( roleId="worker-pool:{}".format(workerPoolId), description="Scopes for image set `{}` and cloud `{}`.".format( image_set.name, cfg["cloud"]), scopes=wp.scopes, ) else: role = None workerpool = WorkerPool( workerPoolId=workerPoolId, description=cfg.get("description", ""), owner=cfg.get("owner", "*****@*****.**"), emailOnError=cfg.get("emailOnError", False), providerId=wp.provider_id, config=wp.config, ) return workerpool, secret, role
def build_worker_pool(workerPoolId, cfg): try: wp = TYPES[cfg['type']](**cfg) except Exception as e: raise RuntimeError('Error generating worker pool configuration for {}'.format(workerPoolId)) from e return WorkerPool( workerPoolId=workerPoolId, description=cfg.get('description', ''), owner=cfg.get('owner', '*****@*****.**'), emailOnError=cfg.get('emailOnError', False), **wp)
def build_worker_pool(workerPoolId, cfg): try: wp = TYPES[cfg["type"]](**cfg) except Exception as e: raise RuntimeError( "Error generating worker pool configuration for {}".format( workerPoolId)) from e return WorkerPool( workerPoolId=workerPoolId, description=cfg.get("description", ""), owner=cfg.get("owner", "*****@*****.**"), emailOnError=cfg.get("emailOnError", False), **wp, )
async def register_worker_pools(resources): externally_managed = [] pools = [] for name, config in parse_yaml("worker-pools.yml").items(): kind = config.pop("kind") if kind == "externally-managed": externally_managed.append(name) elif kind == "static": pass else: builder = { "aws_windows": aws_windows, }[kind] pools.append(WorkerPool( workerPoolId="proj-servo/" + name, description="Servo `%s` workers" % name, owner="*****@*****.**", emailOnError=False, **builder(**config) )) resources.manage("WorkerPool=proj-servo/%s.*" % re_not_match(externally_managed)) resources.update(pools)
def build_worker_pool(workerPoolId, cfg, secret_values, image_set): try: wp = CLOUD_FUNCS[cfg["cloud"]]( secret_values=secret_values, image_set=image_set, **cfg, ) wp = WORKER_IMPLEMENTATION_FUNCS[ image_set.workerImplementation.replace("-", "_")]( secret_values=secret_values, image_set=image_set, wp=wp, **cfg, ) except Exception as e: raise RuntimeError( "Error generating worker pool configuration for {}".format( workerPoolId)) from e if secret_values: if "secret" in wp: secret = Secret(name="worker-pool:{}".format(workerPoolId), secret=wp.pop("secret")) else: secret = None else: secret = Secret(name="worker-pool:{}".format(workerPoolId)) workerpool = WorkerPool( workerPoolId=workerPoolId, description=cfg.get("description", ""), owner=cfg.get("owner", "*****@*****.**"), emailOnError=cfg.get("emailOnError", False), **wp, ) return workerpool, secret
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set(itertools.chain.from_iterable(pool.scopes for pool in pools))) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "minCapacity": 0, "maxCapacity": max(sum(pool.tasks for pool in pools) * 2, 3), "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("12h"), }, } # Mandatory scopes to execute the hook # or create new tasks decision_task_scopes = ( f"queue:scheduler-id:{SCHEDULER_ID}", f"queue:create-task:highest:{PROVISIONER_ID}/{self.task_id}", f"secrets:get:{DECISION_TASK_SECRET}", ) # Build the decision task payload that will trigger the new fuzzing tasks decision_task = { "created": { "$fromNow": "0 seconds" }, "deadline": { "$fromNow": "1 hour" }, "expires": { "$fromNow": "1 week" }, "extra": {}, "metadata": { "description": DESCRIPTION, "name": f"Fuzzing decision {self.task_id}", "owner": OWNER_EMAIL, "source": "https://github.com/MozillaSecurity/fuzzing-tc", }, "payload": { "artifacts": {}, "cache": {}, "capabilities": {}, "env": { "TASKCLUSTER_SECRET": DECISION_TASK_SECRET }, "features": { "taskclusterProxy": True }, "image": { "type": "indexed-image", "path": "public/fuzzing-tc-decision.tar", "namespace": "project.fuzzing.config.master", }, "command": ["fuzzing-decision", self.pool_id], "maxRunTime": parse_time("1h"), }, "priority": "high", "provisionerId": PROVISIONER_ID, "workerType": self.task_id, "retries": 5, "routes": [], "schedulerId": SCHEDULER_ID, "scopes": all_scopes + decision_task_scopes, "tags": {}, } add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"].keys()).isdisjoint( set(env.keys())) decision_task["payload"]["env"].update(env) pool = WorkerPool( workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", providerId=PROVIDER_IDS[self.cloud], description=DESCRIPTION, owner=OWNER_EMAIL, emailOnError=True, config=config, ) hook = Hook( hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, description="Generated Fuzzing hook", owner=OWNER_EMAIL, emailOnError=True, schedule=list(self.cycle_crons()), task=decision_task, bindings=(), triggerSchema={}, ) role = Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=all_scopes + decision_task_scopes, ) return [pool, hook, role]
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set(chain.from_iterable(pool.scopes for pool in pools))) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), }, "maxCapacity": max(sum(pool.tasks for pool in pools) * 2, 3), "minCapacity": 0, } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, )) decision_task["scopes"] = sorted( chain(decision_task["scopes"], all_scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) pool = WorkerPool( config=config, description=DESCRIPTION.replace("\n", "\\n"), emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) hook = Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) role = Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=decision_task["scopes"], ) return [pool, hook, role]
def build_resources(self, providers, machine_types, env=None): """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) config = { "launchConfigs": provider.build_launch_configs(self.imageset, machines, self.disk_size), "lifecycle": { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), }, "maxCapacity": ( # add +1 to expected size, so if we manually trigger the hook, the new # decision can run without also manually cancelling a task # * 2 since Taskcluster seems to not reuse workers very quickly in some # cases, so we end up with a lot of pending tasks. max(1, math.ceil(self.max_run_time / self.cycle_time)) * self.tasks * 2 + 1), "minCapacity": 0, } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, )) decision_task["scopes"] = sorted( chain(decision_task["scopes"], self.scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) pool = WorkerPool( config=config, description=DESCRIPTION, emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) hook = Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) role = Role( description=DESCRIPTION, roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", scopes=decision_task["scopes"], ) return [pool, hook, role]
def build_resources( self, providers: Dict[str, Provider], machine_types: MachineTypes, env=None, ) -> Generator[Union[WorkerPool, Hook, Role], None, None]: """Build the full tc-admin resources to compare and build the pool""" # Select a cloud provider according to configuration assert self.cloud in providers, f"Cloud Provider {self.cloud} not available" provider = providers[self.cloud] pools = list(self.iterpools()) all_scopes = tuple( set( chain.from_iterable( cast(PoolConfiguration, pool).get_scopes() for pool in pools ) ) ) # Build the pool configuration for selected machines machines = self.get_machine_list(machine_types) assert self.imageset is not None assert machines is not None assert self.disk_size is not None assert self.platform is not None config: Dict[str, object] = { "launchConfigs": provider.build_launch_configs( self.imageset, machines, self.disk_size, self.platform ), "maxCapacity": max(sum(pool.tasks for pool in pools if pool.tasks) * 2, 3), "minCapacity": 0, } if self.platform == "linux": config["lifecycle"] = { # give workers 15 minutes to register before assuming they're broken "registrationTimeout": parse_time("15m"), "reregistrationTimeout": parse_time("4d"), } # Build the decision task payload that will trigger the new fuzzing tasks decision_task = yaml.safe_load( DECISION_TASK.substitute( description=DESCRIPTION.replace("\n", "\\n"), max_run_time=parse_time("1h"), owner_email=OWNER_EMAIL, pool_id=self.pool_id, provisioner=PROVISIONER_ID, scheduler=SCHEDULER_ID, secret=DECISION_TASK_SECRET, task_id=self.task_id, ) ) decision_task["scopes"] = sorted(chain(decision_task["scopes"], all_scopes)) add_capabilities_for_scopes(decision_task) if env is not None: assert set(decision_task["payload"]["env"]).isdisjoint(set(env)) decision_task["payload"]["env"].update(env) assert self.cloud is not None if self.cloud != "static": yield WorkerPool( config=config, description=DESCRIPTION.replace("\n", "\\n"), emailOnError=True, owner=OWNER_EMAIL, providerId=PROVIDER_IDS[self.cloud], workerPoolId=f"{WORKER_POOL_PREFIX}/{self.task_id}", ) self_cycle_crons = self.cycle_crons() assert self_cycle_crons is not None yield Hook( bindings=(), description=DESCRIPTION, emailOnError=True, hookGroupId=HOOK_PREFIX, hookId=self.task_id, name=self.task_id, owner=OWNER_EMAIL, schedule=list(self.cycle_crons()), task=decision_task, triggerSchema={}, ) yield Role( roleId=f"hook-id:{HOOK_PREFIX}/{self.task_id}", description=DESCRIPTION, scopes=decision_task["scopes"] + ["queue:create-task:highest:proj-fuzzing/ci"], )