def load_job(self, name: str) -> Any: all_jobs = self.render("k8s", "jobs.yml") for job in serialize.load_all(all_jobs): job_name = job["metadata"]["name"] if not isinstance(job_name, str): raise exceptions.TutorError( f"Invalid job name: '{job_name}'. Expected str.") if job_name == name: return job raise exceptions.TutorError(f"Could not find job '{name}'")
def discover_all(cls) -> None: for path in glob(os.path.join(PLUGINS_ROOT, "*.yml")): with open(path, encoding="utf-8") as f: data = serialize.load(f) if not isinstance(data, dict): raise exceptions.TutorError( f"Invalid plugin: {path}. Expected dict.") try: cls(data) except KeyError as e: raise exceptions.TutorError( f"Invalid plugin: {path}. Missing key: {e.args[0]}")
def _load_tasks(self) -> None: """ Load hooks and check types. """ tasks = get_callable_attr(self.obj, "hooks", default={}) if not isinstance(tasks, dict): raise exceptions.TutorError( f"Invalid hooks in plugin {self.name}. Expected dict, got {tasks.__class__}." ) build_image_tasks = tasks.get("build-image", {}) remote_image_tasks = tasks.get("remote-image", {}) pre_init_tasks = tasks.get("pre-init", []) init_tasks = tasks.get("init", []) # Build images: hooks = {"build-image": {"myimage": "myimage:latest"}} # We assume that the dockerfile is in the build/myimage folder. for img, tag in build_image_tasks.items(): hooks.Filters.IMAGES_BUILD.add_item( (img, ("plugins", self.name, "build", img), tag, []), ) # Remote images: hooks = {"remote-image": {"myimage": "myimage:latest"}} for img, tag in remote_image_tasks.items(): hooks.Filters.IMAGES_PULL.add_item((img, tag), ) hooks.Filters.IMAGES_PUSH.add_item((img, tag), ) # Pre-init scripts: hooks = {"pre-init": ["myservice1", "myservice2"]} for service in pre_init_tasks: path = (self.name, "hooks", service, "pre-init") hooks.Filters.COMMANDS_PRE_INIT.add_item((service, path)) # Init scripts: hooks = {"init": ["myservice1", "myservice2"]} for service in init_tasks: path = (self.name, "hooks", service, "init") hooks.Filters.COMMANDS_INIT.add_item((service, path))
def _load_templates_root(self) -> None: templates_root = get_callable_attr(self.obj, "templates", default=None) if templates_root is None: return if not isinstance(templates_root, str): raise exceptions.TutorError( f"Invalid templates in plugin {self.name}. Expected str, got {templates_root.__class__}." ) hooks.Filters.ENV_TEMPLATE_ROOTS.add_item(templates_root) # We only add the "apps" and "build" folders and we render them in the # "plugins/<plugin name>" folder. hooks.filters.add_items( "env:templates:targets", [ ( os.path.join(self.name, "apps"), "plugins", ), ( os.path.join(self.name, "build"), "plugins", ), ], )
def __init__(self, data: Config): self.loader: Config name = data["name"] if not isinstance(name, str): raise exceptions.TutorError( f"Invalid plugin name: '{name}'. Expected str, got {name.__class__}" ) super().__init__(name, data)
def _load_patches(self) -> None: """ Load patches and check the types are right. """ patches = get_callable_attr(self.obj, "patches", {}) if not isinstance(patches, dict): raise exceptions.TutorError( f"Invalid patches in plugin {self.name}. Expected dict, got {patches.__class__}." ) for patch_name, content in patches.items(): if not isinstance(patch_name, str): raise exceptions.TutorError( f"Invalid patch name '{patch_name}' in plugin {self.name}. Expected str, got {patch_name.__class__}." ) if not isinstance(content, str): raise exceptions.TutorError( f"Invalid patch '{patch_name}' in plugin {self.name}. Expected str, got {content.__class__}." ) hooks.Filters.ENV_PATCH(patch_name).add_item(content)
def _load_command(self) -> None: command = getattr(self.obj, "command", None) if command is None: return if not isinstance(command, click.Command): raise exceptions.TutorError( f"Invalid command in plugin {self.name}. Expected click.Command, got {command.__class__}." ) # We force the command name to the plugin name command.name = self.name hooks.Filters.CLI_COMMANDS.add_item(command)
def load(name: str) -> None: """ Load a given plugin, thus declaring all its hooks. Loading a plugin is done within a context, such that we can remove all hooks when a plugin is disabled, or during unit tests. """ if not is_installed(name): raise exceptions.TutorError(f"plugin '{name}' is not installed.") with hooks.Contexts.PLUGINS.enter(): with hooks.Contexts.APP(name).enter(): hooks.Actions.PLUGIN_LOADED(name).do() hooks.Filters.PLUGINS_LOADED.add_item(name)
def _load_config(self) -> None: """ Load config and check types. """ config = get_callable_attr(self.obj, "config", {}) if not isinstance(config, dict): raise exceptions.TutorError( f"Invalid config in plugin {self.name}. Expected dict, got {config.__class__}." ) for name, subconfig in config.items(): if not isinstance(name, str): raise exceptions.TutorError( f"Invalid config entry '{name}' in plugin {self.name}. Expected str, got {config.__class__}." ) if not isinstance(subconfig, dict): raise exceptions.TutorError( f"Invalid config entry '{name}' in plugin {self.name}. Expected str keys, got {config.__class__}." ) for key in subconfig.keys(): if not isinstance(key, str): raise exceptions.TutorError( f"Invalid config entry '{name}.{key}' in plugin {self.name}. Expected str, got {key.__class__}." ) # Config keys in the "add" and "defaults" dicts must be prefixed by # the plugin name, in uppercase. key_prefix = self.name.upper() + "_" hooks.Filters.CONFIG_UNIQUE.add_items( [(f"{key_prefix}{key}", value) for key, value in config.get("add", {}).items()], ) hooks.Filters.CONFIG_DEFAULTS.add_items( [(f"{key_prefix}{key}", value) for key, value in config.get("defaults", {}).items()], ) hooks.Filters.CONFIG_OVERRIDES.add_items( [(key, value) for key, value in config.get("set", {}).items()], )
def load(root: str) -> Config: """ Load full configuration. This will raise an exception if there is no current configuration in the project root. A warning will also be printed if the version from disk differs from the package version. """ if not os.path.exists(config_path(root)): raise exceptions.TutorError( "Project root does not exist. Make sure to generate the initial " "configuration with `tutor config save --interactive` or `tutor local " "quickstart` prior to running other commands.") env.check_is_up_to_date(root) return load_full(root)
def convert_json2yml(root: str) -> None: """ Older versions of tutor used to have json config files. """ json_path = os.path.join(root, "config.json") if not os.path.exists(json_path): return if os.path.exists(config_path(root)): raise exceptions.TutorError( f"Both config.json and {CONFIG_FILENAME} exist in {root}: only one of these files must exist to continue" ) config = get_yaml_file(json_path) save_config_file(root, config) os.remove(json_path) fmt.echo_info( f"File config.json detected in {root} and converted to {CONFIG_FILENAME}" )
def _delete_plugin_templates(plugin: str, root: str, _config: Config) -> None: """ Delete plugin env files on unload. """ targets: t.Iterator[t.Tuple[str, str]] = hooks.Filters.ENV_TEMPLATE_TARGETS.iterate( context=hooks.Contexts.APP(plugin).name ) for src, dst in targets: path = pathjoin(root, dst.replace("/", os.sep), src.replace("/", os.sep)) if os.path.exists(path): fmt.echo_info(f" env - removing folder: {path}") try: shutil.rmtree(path) except PermissionError as e: raise exceptions.TutorError( f"Could not delete file {e.filename} from plugin {plugin} in folder {path}" )
def kubectl_exec(config: Config, service: str, command: List[str]) -> int: selector = f"app.kubernetes.io/name={service}" pods = K8sClients.instance().core_api.list_namespaced_pod( namespace=k8s_namespace(config), label_selector=selector) if not pods.items: raise exceptions.TutorError( f"Could not find an active pod for the {service} service") pod_name = pods.items[0].metadata.name # Run command return utils.kubectl( "exec", "--stdin", "--tty", "--namespace", k8s_namespace(config), pod_name, "--", *command, )
def install(location: str) -> None: basename = os.path.basename(location) if not basename.endswith(".yml") and not basename.endswith(".py"): basename += ".py" plugin_path = os.path.join(PLUGINS_ROOT, basename) if location.startswith("http"): # Download file response = urllib.request.urlopen(location) content = response.read().decode() elif os.path.isfile(location): # Read file with open(location, encoding="utf-8") as f: content = f.read() else: raise exceptions.TutorError(f"No plugin found at {location}") # Save file if not os.path.exists(PLUGINS_ROOT): os.makedirs(PLUGINS_ROOT) with open(plugin_path, "w", newline="\n", encoding="utf-8") as f: f.write(content) fmt.echo_info(f"Plugin installed at {plugin_path}")
def __render(self, template: jinja2.Template) -> str: try: return template.render(**self.config) except jinja2.exceptions.UndefinedError as e: raise exceptions.TutorError( f"Missing configuration value: {e.args[0]}")
def run_job(self, service: str, command: str) -> int: job_name = f"{service}-job" job = self.load_job(job_name) # Create a unique job name to make it deduplicate jobs and make it easier to # find later. Logs of older jobs will remain available for some time. job_name += "-" + datetime.now().strftime("%Y%m%d%H%M%S") # Wait until all other jobs are completed while True: active_jobs = self.active_job_names() if not active_jobs: break fmt.echo_info( f"Waiting for active jobs to terminate: {' '.join(active_jobs)}" ) sleep(5) # Configure job job["metadata"]["name"] = job_name job["metadata"].setdefault("labels", {}) job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name # Define k8s entrypoint/args shell_command = ["sh", "-e", "-c"] if job["spec"]["template"]["spec"]["containers"][0].get( "command") == []: # In some cases, we need to bypass the container entrypoint. # Unfortunately, AFAIK, there is no way to do so in K8s manifests. So we mark # some jobs with "command: []". For these jobs, the entrypoint becomes "sh -e -c". # We do not do this for every job, because some (most) entrypoints are actually useful. job["spec"]["template"]["spec"]["containers"][0][ "command"] = shell_command container_args = [command] else: container_args = shell_command + [command] job["spec"]["template"]["spec"]["containers"][0][ "args"] = container_args job["spec"]["backoffLimit"] = 1 job["spec"]["ttlSecondsAfterFinished"] = 3600 # Save patched job to "jobs.yml" file with open(tutor_env.pathjoin(self.root, "k8s", "jobs.yml"), "w", encoding="utf-8") as job_file: serialize.dump(job, job_file) # We cannot use the k8s API to create the job: configMap and volume names need # to be found with the right suffixes. kubectl_apply( self.root, "--selector", f"app.kubernetes.io/name={job_name}", ) message = ( "Job {job_name} is running. To view the logs from this job, run:\n\n" """ kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """ """--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n""" "Waiting for job completion...").format(job_name=job_name, namespace=k8s_namespace( self.config)) fmt.echo_info(message) # Wait for completion field_selector = f"metadata.name={job_name}" while True: namespaced_jobs = K8sClients.instance( ).batch_api.list_namespaced_job(k8s_namespace(self.config), field_selector=field_selector) if not namespaced_jobs.items: continue job = namespaced_jobs.items[0] if not job.status.active: if job.status.succeeded: fmt.echo_info(f"Job {job_name} successful.") break if job.status.failed: raise exceptions.TutorError( f"Job {job_name} failed. View the job logs to debug this issue." ) sleep(5) return 0
def _version(self) -> t.Optional[str]: if not self.loader.dist: raise exceptions.TutorError( f"Entrypoint plugin '{self.name}' has no dist.") return self.loader.dist.version