def schedule_task_eks(triggerName: str, frequency: str, taskConfiguration: dict) -> Any: """ Parameters ---------- triggerName: str A unique name of the time trigger that will start this exec frequency: str A cron string e.g., cron(0/15 * 1/1 * ? *) to define the starting times of the execution taskConfiguration: Any Return ------ Example ------- Example for schedule_task similar to run_python and run_notebook tasks (refer to 'run_python'). """ props = get_properties() team_name = props["AWS_ORBIT_TEAM_SPACE"] node_type = get_node_type(taskConfiguration) username = (os.environ.get("JUPYTERHUB_USER", os.environ.get("USERNAME"))).split("@")[0] cronjob_id = f"orbit-{team_name}-{triggerName}" labels = { "app": f"orbit-runner", "orbit/node-type": node_type, "username": username, "cronjob_id": cronjob_id } team_constants: TeamConstants = TeamConstants(username) job_spec = _create_eks_job_spec(taskConfiguration, labels=labels, team_constants=team_constants) cron_job_template: V1beta1JobTemplateSpec = V1beta1JobTemplateSpec( spec=job_spec) cron_job_spec: V1beta1CronJobSpec = V1beta1CronJobSpec( job_template=cron_job_template, schedule=frequency) job = V1beta1CronJob( api_version="batch/v1beta1", kind="CronJob", metadata=V1ObjectMeta(name=cronjob_id, labels=labels, namespace=team_name), status=V1beta1CronJobStatus(), spec=cron_job_spec, ) load_kube_config() job_instance: V1beta1CronJob = BatchV1beta1Api( ).create_namespaced_cron_job(namespace=team_name, body=job) metadata: V1ObjectMeta = job_instance.metadata return { "ExecutionType": "eks", "Identifier": metadata.name, } metadata: V1ObjectMeta = job_instance.metadata _logger.debug(f"started job {metadata.name}")
def _run_task_eks(taskConfiguration: dict) -> Any: """ Runs Task in Python in a notebook using lambda. Parameters ---------- taskConfiguration: dict A task definition to execute. Returns ------- Response Payload """ props = get_properties() team_name = props["AWS_ORBIT_TEAM_SPACE"] username = (os.environ.get("JUPYTERHUB_USER", os.environ.get("USERNAME"))).split("@")[0] node_type = get_node_type(taskConfiguration) labels = { "app": f"orbit-runner", "orbit/node-type": node_type, "username": username } if node_type == "ec2": labels["orbit/attach-security-group"] = "yes" team_constants: TeamConstants = TeamConstants() job_spec = _create_eks_job_spec(taskConfiguration, labels=labels, team_constants=team_constants) load_kube_config() if "compute" in taskConfiguration: if "labels" in taskConfiguration["compute"]: labels = {**labels, **taskConfiguration["compute"]["labels"]} job = V1Job( api_version="batch/v1", kind="Job", metadata=V1ObjectMeta( generate_name=f"orbit-{team_name}-{node_type}-runner-", labels=labels, namespace=team_name), spec=job_spec, ) job_instance: V1Job = BatchV1Api().create_namespaced_job( namespace=team_name, body=job, ) metadata: V1ObjectMeta = job_instance.metadata _logger.debug(f"started job {metadata.name}") return { "ExecutionType": "eks", "Identifier": metadata.name, "NodeType": node_type, "tasks": taskConfiguration["tasks"], }
def get(self): global DATA self.log.info(f"GET - {self.__class__}") if "MOCK" not in os.environ or os.environ["MOCK"] == "0": DATA = get_workspace() PROFILES_DATA = TeamConstants().team_profiles() # hide some details if "Elbs" in DATA: del DATA["Elbs"] if "Plugins" in DATA: del DATA["Plugins"] if "MOCK" in os.environ: path = f"{Path(__file__).parent.parent.parent}/test/mockup/team.json" self.log.info(f"writing mockup data to {path}") with open(path, "w") as outfile: json.dump(DATA, outfile, indent=4) else: path = f"{Path(__file__).parent.parent.parent}/test/mockup/team.json" with open(path) as f: DATA = json.load(f) self.finish(self._dump(DATA, PROFILES_DATA))
def _create_eks_job_spec(taskConfiguration: dict, labels: Dict[str, str], team_constants: TeamConstants) -> V1JobSpec: """ Runs Task in Python in a notebook using lambda. Parameters ---------- taskConfiguration: dict A task definition to execute. Returns ------- Response Payload """ props = get_properties() global __CURRENT_TEAM_MANIFEST__, __CURRENT_ENV_MANIFEST__ env_name = props["AWS_ORBIT_ENV"] team_name = props["AWS_ORBIT_TEAM_SPACE"] if __CURRENT_TEAM_MANIFEST__ == None or __CURRENT_TEAM_MANIFEST__["Name"] != team_name: __CURRENT_TEAM_MANIFEST__ = load_team_context_from_ssm(env_name, team_name) if __CURRENT_ENV_MANIFEST__ == None: __CURRENT_ENV_MANIFEST__ = load_env_context_from_ssm(env_name) env = build_env(__CURRENT_ENV_MANIFEST__, env_name, taskConfiguration, team_constants, team_name) profile = resolve_profile(taskConfiguration, team_constants) image = resolve_image(__CURRENT_TEAM_MANIFEST__, profile) node_type = get_node_type(taskConfiguration) job_name: str = f'run-{taskConfiguration["task_type"]}' volumes = team_constants.volumes() volume_mounts = team_constants.volume_mounts() grant_sudo = False if "kubespawner_override" in profile: if "volumes" in profile["kubespawner_override"]: volumes.extend(profile["kubespawner_override"]["volumes"]) _logger.info("profile override is attaching volumes: %s", volumes) if "volume_mounts" in profile["kubespawner_override"]: volume_mounts.extend(profile["kubespawner_override"]["volume_mounts"]) _logger.info("profile override is mounting volumes: %s", volume_mounts) if "compute" in taskConfiguration: if "grant_sudo" in taskConfiguration["compute"]: if taskConfiguration["compute"]["grant_sudo"] or taskConfiguration["compute"]["grant_sudo"] == "True": grant_sudo = True if "volumes" in taskConfiguration["compute"]: volumes.extend(taskConfiguration["compute"]["volumes"]) _logger.info("task override is attaching volumes: %s", volumes) if "volume_mounts" in taskConfiguration["compute"]: volume_mounts.extend(taskConfiguration["compute"]["volume_mounts"]) _logger.info("task override is mounting volumes: %s", volume_mounts) if "labels" in taskConfiguration["compute"]: labels = {**labels, **taskConfiguration["compute"]["labels"]} node_selector = team_constants.node_selector(node_type) _logger.info("volumes:%s", json.dumps(volumes)) _logger.info("volume_mounts:%s", json.dumps(volume_mounts)) pod_properties: Dict[str, str] = dict( name=job_name, image=image, cmd=["bash", "-c", "/home/jovyan/.orbit/bootstrap.sh && python /opt/python-utils/notebook_cli.py"], port=22, image_pull_policy=team_constants.image_pull_policy(), image_pull_secrets=None, node_selector=node_selector, run_as_uid=team_constants.uid(grant_sudo), run_as_gid=team_constants.gid(), fs_gid=team_constants.gid(), run_privileged=False, allow_privilege_escalation=True, env=env, volumes=volumes, volume_mounts=volume_mounts, labels=labels, annotations=team_constants.annotations(), lifecycle_hooks=team_constants.life_cycle_hooks(), service_account=team_name, logger=_logger, ) if grant_sudo: pod_properties["uid"] = 0 if "kubespawner_override" in profile: for k, v in profile["kubespawner_override"].items(): if k in ["image"]: # special handling is already done for image continue if k in pod_properties: raise RuntimeError("Override '%s' in profile is not allowed", k) _logger.debug("profile overriding pod value %s=%s", k, v) pod_properties[k] = v pod: V1Pod = make_pod(**pod_properties) pod.spec.restart_policy = "Never" job_spec = V1JobSpec( backoff_limit=0, template=pod, ttl_seconds_after_finished=120, ) return job_spec