Ejemplo n.º 1
0
 def __init__(self, root: str, config: Config):
     """
     Load docker-compose files from dev/ and local/
     """
     super().__init__(root, config)
     self.project_name = get_typed(self.config, "DEV_PROJECT_NAME", str)
     self.docker_compose_tmp_path = tutor_env.pathjoin(
         self.root, "dev", "docker-compose.tmp.yml"
     )
     self.docker_compose_jobs_tmp_path = tutor_env.pathjoin(
         self.root, "dev", "docker-compose.jobs.tmp.yml"
     )
     self.docker_compose_files += [
         tutor_env.pathjoin(self.root, "local", "docker-compose.yml"),
         tutor_env.pathjoin(self.root, "dev", "docker-compose.yml"),
         self.docker_compose_tmp_path,
         tutor_env.pathjoin(self.root, "local", "docker-compose.override.yml"),
         tutor_env.pathjoin(self.root, "dev", "docker-compose.override.yml"),
     ]
     self.docker_compose_job_files += [
         tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.yml"),
         tutor_env.pathjoin(self.root, "dev", "docker-compose.jobs.yml"),
         self.docker_compose_jobs_tmp_path,
         tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.override.yml"),
         tutor_env.pathjoin(self.root, "dev", "docker-compose.jobs.override.yml"),
     ]
Ejemplo n.º 2
0
def build(
    context: Context,
    image_names: t.List[str],
    no_cache: bool,
    build_args: t.List[str],
    add_hosts: t.List[str],
    target: str,
    docker_args: t.List[str],
) -> None:
    config = tutor_config.load(context.root)
    command_args = []
    if no_cache:
        command_args.append("--no-cache")
    for build_arg in build_args:
        command_args += ["--build-arg", build_arg]
    for add_host in add_hosts:
        command_args += ["--add-host", add_host]
    if target:
        command_args += ["--target", target]
    if docker_args:
        command_args += docker_args
    for image in image_names:
        for _name, path, tag, custom_args in find_images_to_build(
                config, image):
            images.build(
                tutor_env.pathjoin(context.root, *path),
                tag,
                *command_args,
                *custom_args,
            )
Ejemplo n.º 3
0
def delete(context: K8sContext, yes: bool) -> None:
    if not yes:
        click.confirm(
            "Are you sure you want to delete the platform? All data will be removed.",
            abort=True,
        )
    utils.kubectl(
        "delete",
        "-k",
        tutor_env.pathjoin(context.root),
        "--ignore-not-found=true",
        "--wait",
    )
Ejemplo n.º 4
0
 def run_job(self, service: str, command: str) -> int:
     """
     Run the "{{ service }}-job" service from local/docker-compose.jobs.yml with the
     specified command.
     """
     run_command = []
     for docker_compose_path in self.docker_compose_job_files:
         path = tutor_env.pathjoin(self.root, docker_compose_path)
         if os.path.exists(path):
             run_command += ["-f", path]
     run_command += ["run", "--rm"]
     if not utils.is_a_tty():
         run_command += ["-T"]
     job_service_name = f"{service}-job"
     return self.docker_compose(
         *run_command,
         job_service_name,
         "sh",
         "-e",
         "-c",
         command,
     )
Ejemplo n.º 5
0
 def test_pathjoin(self):
     self.assertEqual(
         "/tmp/env/target/dummy", env.pathjoin("/tmp", "target", "dummy")
     )
     self.assertEqual("/tmp/env/dummy", env.pathjoin("/tmp", "dummy"))
Ejemplo n.º 6
0
 def test_pathjoin(self) -> None:
     with temporary_root() as root:
         self.assertEqual(os.path.join(env.base_dir(root), "dummy"),
                          env.pathjoin(root, "dummy"))
Ejemplo n.º 7
0
    def run_job(self, service: str, command: str) -> int:
        job_name = f"{service}-job"
        job = self.load_job(job_name)
        # Create a unique job name to make it deduplicate jobs and make it easier to
        # find later. Logs of older jobs will remain available for some time.
        job_name += "-" + datetime.now().strftime("%Y%m%d%H%M%S")

        # Wait until all other jobs are completed
        while True:
            active_jobs = self.active_job_names()
            if not active_jobs:
                break
            fmt.echo_info(
                f"Waiting for active jobs to terminate: {' '.join(active_jobs)}"
            )
            sleep(5)

        # Configure job
        job["metadata"]["name"] = job_name
        job["metadata"].setdefault("labels", {})
        job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name
        # Define k8s entrypoint/args
        shell_command = ["sh", "-e", "-c"]
        if job["spec"]["template"]["spec"]["containers"][0].get(
                "command") == []:
            # In some cases, we need to bypass the container entrypoint.
            # Unfortunately, AFAIK, there is no way to do so in K8s manifests. So we mark
            # some jobs with "command: []". For these jobs, the entrypoint becomes "sh -e -c".
            # We do not do this for every job, because some (most) entrypoints are actually useful.
            job["spec"]["template"]["spec"]["containers"][0][
                "command"] = shell_command
            container_args = [command]
        else:
            container_args = shell_command + [command]
        job["spec"]["template"]["spec"]["containers"][0][
            "args"] = container_args
        job["spec"]["backoffLimit"] = 1
        job["spec"]["ttlSecondsAfterFinished"] = 3600
        # Save patched job to "jobs.yml" file
        with open(tutor_env.pathjoin(self.root, "k8s", "jobs.yml"),
                  "w",
                  encoding="utf-8") as job_file:
            serialize.dump(job, job_file)
        # We cannot use the k8s API to create the job: configMap and volume names need
        # to be found with the right suffixes.
        kubectl_apply(
            self.root,
            "--selector",
            f"app.kubernetes.io/name={job_name}",
        )

        message = (
            "Job {job_name} is running. To view the logs from this job, run:\n\n"
            """    kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """
            """--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n"""
            "Waiting for job completion...").format(job_name=job_name,
                                                    namespace=k8s_namespace(
                                                        self.config))
        fmt.echo_info(message)

        # Wait for completion
        field_selector = f"metadata.name={job_name}"
        while True:
            namespaced_jobs = K8sClients.instance(
            ).batch_api.list_namespaced_job(k8s_namespace(self.config),
                                            field_selector=field_selector)
            if not namespaced_jobs.items:
                continue
            job = namespaced_jobs.items[0]
            if not job.status.active:
                if job.status.succeeded:
                    fmt.echo_info(f"Job {job_name} successful.")
                    break
                if job.status.failed:
                    raise exceptions.TutorError(
                        f"Job {job_name} failed. View the job logs to debug this issue."
                    )
            sleep(5)
        return 0
Ejemplo n.º 8
0
def kubectl_apply(root: str, *args: str) -> None:
    utils.kubectl("apply", "--kustomize", tutor_env.pathjoin(root), *args)