Esempio n. 1
0
    def create_kaniko_task(self,
                           task_template,
                           dockerfile,
                           context,
                           destination,
                           no_push=False):
        """
        A task for building images inside a cluster container using Kaniko.
        If we are testing the workflow locally then we won't be pushing images
        to any registries. This will make it easier for people to try out and
        extend the code.
        """
        kaniko = argo_build_util.deep_copy(task_template)

        # append the tag base-commit[0:7]
        if ":" not in destination:
            sha = os.getenv("PULL_BASE_SHA", "12341234kanikotest")
            base = os.getenv("PULL_BASE_REF", "master")
            destination += ":%s-%s" % (base, sha[0:8])

        kaniko["name"] = "kaniko-build-push"
        kaniko["container"]["image"] = "gcr.io/kaniko-project/executor:v1.5.0"
        kaniko["container"]["command"] = ["/kaniko/executor"]
        kaniko["container"]["args"] = [
            "--dockerfile=%s" % dockerfile,
            "--context=%s" % context,
            "--destination=%s" % destination
        ]

        # don't push the image to a registry if trying out the produced
        # Argo Workflow yaml locally
        if LOCAL_TESTING == "True" or no_push:
            kaniko["container"]["args"].append("--no-push")

        return kaniko
Esempio n. 2
0
  def _build_step(self, name, workflow, dag_name, task_template,
                  command, dependences):
    """Syntactic sugar to add a step to the workflow"""

    step = argo_build_util.deep_copy(task_template)

    step["name"] = name
    step["container"]["command"] = command

    return argo_build_util.add_task_to_dag(workflow, dag_name, step, dependences)
Esempio n. 3
0
    def create_format_python_task(self, task_template, workingDir):
        format_task = argo_build_util.deep_copy(task_template)

        format_task["name"] = "check-python-formatting"
        format_task["container"]["image"] = "python:3.7-slim-buster"

        format_task["container"]["command"] = ["/bin/sh", "-c"]
        format_task["container"]["args"] = ["pip install flake8 && flake8 ."]

        format_task["container"]["workingDir"] = workingDir

        return format_task
Esempio n. 4
0
    def create_format_typescript_task(self, task_template, workingDir):
        format_task = argo_build_util.deep_copy(task_template)

        format_task["name"] = "check-frontend-formatting"
        format_task["container"]["image"] = "node:12.20.1-stretch-slim"

        format_task["container"]["command"] = ["npm"]
        format_task["container"]["args"] = ["run", "format:check"]

        format_task["container"]["workingDir"] = workingDir

        return format_task
    def _kustomize_build_task(self, task_template):
        k_build = argo_build_util.deep_copy(task_template)

        k_build["name"] = "kustomize-build-test"
        k_build["container"]["image"] = "k8s.gcr.io/kustomize/kustomize:v4.1.2"
        k_build["container"]["args"] = ["build"]

        manifest_dir = ("%s/components/notebook-controller/config/"
                        "overlays/kubeflow/") % self.src_dir
        k_build["container"]["workingDir"] = manifest_dir

        return k_build
Esempio n. 6
0
    def create_install_modules_task(self, task_template, workingDir):
        install = argo_build_util.deep_copy(task_template)

        install["name"] = "npm-modules-install"
        install["container"]["image"] = "node:12.20.1-stretch-slim"

        install["container"]["command"] = ["npm"]
        install["container"]["args"] = ["ci"]

        install["container"]["workingDir"] = workingDir

        return install
Esempio n. 7
0
    def _kustomize_build_task(self, task_template):
        k_build = argo_build_util.deep_copy(task_template)

        k_build["name"] = "kustomize-build-test"
        k_build["container"]["image"] = "k8s.gcr.io/kustomize/kustomize:v4.1.2"
        k_build["container"]["args"] = ["build"]

        manifest_dir = ("%s/components/crud-web-apps/jupyter/manifests/"
                        "overlays/istio/") % self.src_dir
        k_build["container"]["workingDir"] = manifest_dir

        return k_build
Esempio n. 8
0
    def _create_ui_build_task(self, task_template):
        ui_build = argo_build_util.deep_copy(task_template)

        ui_build["name"] = "build-shared-ui-library"
        ui_build["container"]["image"] = "node:12.20.1-stretch-slim"
        ui_build["container"]["command"] = ["npm"]
        ui_build["container"]["args"] = ["run", "build"]

        ui_dir = ("%s/components/crud-web-apps/common/"
                  "frontend/kubeflow-common-lib/") % self.src_dir
        ui_build["container"]["workingDir"] = ui_dir

        return ui_build
Esempio n. 9
0
    def _create_install_modules_task(self, task_template):
        install = argo_build_util.deep_copy(task_template)

        install["name"] = "npm-modules-install"
        install["container"]["image"] = "node:12.20.1-stretch-slim"
        install["container"]["command"] = ["npm"]
        install["container"]["args"] = ["ci"]

        ui_dir = ("%s/components/crud-web-apps/common/"
                  "frontend/kubeflow-common-lib/") % self.src_dir
        install["container"]["workingDir"] = ui_dir

        return install
Esempio n. 10
0
    def _create_make_dir_task(self, task_template):
        """Create the directory to store the artifacts of each task"""
        # (jlewi)
        # pytest was failing trying to call makedirs. My suspicion is its
        # because the two steps ended up trying to create the directory at the
        # same time and classing. So we create a separate step to do it.
        mkdir_step = argo_build_util.deep_copy(task_template)

        mkdir_step["name"] = self.mkdir_task_name
        mkdir_step["container"]["command"] = ["mkdir", "-p",
                                              self.artifacts_dir]

        return mkdir_step
Esempio n. 11
0
    def _create_exit_handler(self, task_template):
        ui_build = argo_build_util.deep_copy(task_template)

        ui_build["name"] = "rm-node-modules"
        ui_build["container"]["image"] = "node:12.20.1-stretch-slim"
        ui_build["container"]["command"] = ["rm"]
        ui_build["container"]["args"] = ["-r", "node_modules"]

        ui_dir = ("%s/components/crud-web-apps/common/"
                  "frontend/kubeflow-common-lib/") % self.src_dir
        ui_build["container"]["workingDir"] = ui_dir

        return ui_build
Esempio n. 12
0
    def _create_ui_tests_task(self, task_template):
        ui_tests = argo_build_util.deep_copy(task_template)

        img = "browserless/chrome:1.44-chrome-stable"
        ui_tests["name"] = "common-ui-tests"
        ui_tests["container"]["image"] = img
        ui_tests["container"]["command"] = ["npm"]
        ui_tests["container"]["args"] = ["run", "test-ci"]

        ui_dir = ("%s/components/crud-web-apps/common/"
                  "frontend/kubeflow-common-lib/") % self.src_dir
        ui_tests["container"]["workingDir"] = ui_dir

        return ui_tests
Esempio n. 13
0
    def create_kaniko_task(self,
                           task_template,
                           dockerfile,
                           context,
                           destination,
                           no_push=False):
        """
        A task for building images inside a cluster container using Kaniko.
        If we are testing the workflow locally then we won't be pushing images
        to any registries. This will make it easier for people to try out and
        extend the code.
        """
        kaniko = argo_build_util.deep_copy(task_template)
        # for short UUID generation
        alphabet = string.ascii_lowercase + string.digits

        # append the tag base-commit[0:7]
        if ":" not in destination:
            if self.release:
                with open(
                        os.path.join("/src/kubeflow/kubeflow",
                                     "releasing/version/VERSION")) as f:
                    version = f.read().strip()
                destination += ":%s" % version
            else:
                sha = os.getenv("PULL_BASE_SHA", "12341234kanikotest")
                base = os.getenv("PULL_BASE_REF", "master")
                destination += ":%s-%s" % (base, sha[0:8])

        # add short UUID to step name to ensure it is unique
        random_suffix = ''.join(random.choices(alphabet, k=8))
        kaniko["name"] = "kaniko-build-push-" + random_suffix
        kaniko["container"]["image"] = "gcr.io/kaniko-project/executor:v1.5.0"
        kaniko["container"]["command"] = ["/kaniko/executor"]
        kaniko["container"]["args"] = [
            "--dockerfile=%s" % dockerfile,
            "--context=%s" % context,
            "--destination=%s" % destination
        ]

        # don't push the image to a registry if trying out the produced
        # Argo Workflow yaml locally
        if LOCAL_TESTING == "True" or no_push:
            kaniko["container"]["args"].append("--no-push")

        return kaniko
    def _build_step(self, name, workflow, dag_name, task_template, command,
                    dependencies):
        """Syntactic sugar to add a step to the workflow"""

        step = argo_build_util.deep_copy(task_template)

        step["name"] = name
        step["container"]["command"] = command

        argo_build_util.add_task_to_dag(workflow, dag_name, step, dependencies)

        # Return the newly created template; add_task_to_dag makes a copy of the template
        # So we need to fetch it from the workflow spec.
        for t in workflow["spec"]["templates"]:
            if t["name"] == name:
                return t

        return None
Esempio n. 15
0
    def _create_checkout_task(self, task_template):
        """Checkout the kubeflow/testing and kubeflow/kubeflow code"""
        main_repo = argo_build_util.get_repo_from_prow_env()
        if not main_repo:
            logging.info("Prow environment variables for repo not set")
            main_repo = "kubeflow/testing@HEAD"
        logging.info("Main repository: %s", main_repo)
        repos = [main_repo]

        checkout = argo_build_util.deep_copy(task_template)

        checkout["name"] = "checkout"
        checkout["container"]["command"] = [
            "/usr/local/bin/checkout_repos.sh",
            "--repos=" + ",".join(repos),
            "--src_dir=" + self.src_root_dir,
        ]

        return checkout
Esempio n. 16
0
    def build(self):
        self.workflow = self._build_workflow()
        task_template = self._build_task_template()

        #**************************************************************************
        # Checkout

        # create the checkout step
        main_repo = argo_build_util.get_repo_from_prow_env()
        if not main_repo:
            logging.info("Prow environment variables for repo not set")
            main_repo = MAIN_REPO + "@HEAD"
        logging.info("Main repository: %s", main_repo)
        repos = [main_repo]

        repos.extend(EXTRA_REPOS)

        checkout = argo_build_util.deep_copy(task_template)

        checkout["name"] = "checkout"
        checkout["container"]["command"] = [
            "/usr/local/bin/checkout_repos.sh", "--repos=" + ",".join(repos),
            "--src_dir=" + self.src_root_dir
        ]

        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout,
                                        [])

        # Change the workfing directory for all subsequent steps
        task_template["container"]["workingDir"] = os.path.join(
            self.kfctl_pytest_dir)

        #**************************************************************************
        # Run build_kfctl and deploy kubeflow

        step_name = "kfctl-build-deploy"
        command = [
            "pytest",
            "kfctl_go_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            "--config_path=" + self.config_path,
            "--build_and_apply=" + str(self.build_and_apply),
            # Increase the log level so that info level log statements show up.
            # TODO(https://github.com/kubeflow/testing/issues/372): If we
            # set a unique artifacts dir for each workflow with the proper
            # prefix that should work.
            "--log-cli-level=info",
            "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test" +
            self.config_name + ".xml",
            # TODO(jlewi) Test suite name needs to be unique based on parameters.
            #
            "-o",
            "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
            "--app_path=" + self.app_dir,
        ]

        dependences = [checkout["name"]]
        build_kfctl = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                       task_template, command, dependences)

        #**************************************************************************
        # Wait for Kubeflow to be ready
        step_name = "kubeflow-is-ready"
        command = [
            "pytest",
            "kf_is_ready_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            # TODO(jlewi): We should update kf_is_ready_test to take the config
            # path and then based on the KfDef spec kf_is_ready_test should
            # figure out what to do.
            "--use_basic_auth={0}".format(self.use_basic_auth),
            # TODO(jlewi): We should be using ISTIO always so can we stop
            # setting this
            "--use_istio=true",
            # Increase the log level so that info level log statements show up.
            "--log-cli-level=info",
            "--junitxml=" + os.path.join(
                self.artifacts_dir,
                "junit_kfctl-is-ready-test-" + self.config_name + ".xml"),
            # Test suite name needs to be unique based on parameters
            "-o",
            "junit_suite_name=test_kf_is_ready_" + self.config_name,
            "--app_path=" + self.app_dir,
        ]

        dependences = [build_kfctl["name"]]
        kf_is_ready = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                       task_template, command, dependences)

        #**************************************************************************
        # Wait for endpoint to be ready
        if self.test_endpoint:
            step_name = "endpoint-is-ready"
            command = [
                "pytest",
                "endpoint_ready_test.py",
                # I think -s mean stdout/stderr will print out to aid in debugging.
                # Failures still appear to be captured and stored in the junit file.
                "-s",
                # Increase the log level so that info level log statements show up.
                "--log-cli-level=info",
                # Test timeout in seconds.
                "--timeout=1800",
                "--junitxml=" + self.artifacts_dir +
                "/junit_endpoint-is-ready-test-" + self.config_name + ".xml",
                # Test suite name needs to be unique based on parameters
                "-o",
                "junit_suite_name=test_endpoint_is_ready_" + self.config_name,
                "--app_path=" + self.app_dir,
                "--app_name=" + self.app_name,
            ]

            dependencies = [build_kfctl["name"]]
            endpoint_ready = self._build_step(step_name, self.workflow,
                                              E2E_DAG_NAME, task_template,
                                              command, dependencies)

        self._build_tests_dag()

        # Add a task to run the dag
        dependencies = [kf_is_ready["name"]]
        argo_build_util.add_task_only_to_dag(self.workflow, E2E_DAG_NAME,
                                             TESTS_DAG_NAME, TESTS_DAG_NAME,
                                             dependencies)

        #***************************************************************************
        # create_pr_symlink
        #***************************************************************************
        # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
        step_name = "create-pr-symlink"
        command = [
            "python",
            "-m",
            "kubeflow.testing.prow_artifacts",
            "--artifacts_dir=" + self.output_dir,
            "create_pr_symlink",
            "--bucket=" + self.bucket,
        ]

        dependences = [checkout["name"]]
        symlink = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                   task_template, command, dependences)

        self._build_exit_dag()

        # Set the labels on all templates
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow
Esempio n. 17
0
  def build(self):
    self.workflow = self._build_workflow()
    task_template = self._build_task_template()
    py3_template = argo_build_util.deep_copy(task_template)
    py3_template["container"]["image"] = "gcr.io/kubeflow-ci/test-worker-py3:e9afed1-dirty"

    #**************************************************************************
    # Checkout

    # create the checkout step

    checkout = argo_build_util.deep_copy(task_template)

    # Construct the list of repos to checkout
    list_of_repos = DEFAULT_REPOS
    list_of_repos.append(self.main_repo)
    list_of_repos.extend(self.extra_repos)
    repos = util.combine_repos(list_of_repos)
    repos_str = ','.join(['%s@%s' % (key, value) for (key, value) in repos.items()])


    # If we are using a specific branch (e.g. periodic tests for release branch)
    # then we need to use depth = all; otherwise checkout out the branch
    # will fail. Otherwise we checkout with depth=30. We want more than
    # depth=1 because the depth will determine our ability to find the common
    # ancestor which affects our ability to determine which files have changed
    depth = 30
    if os.getenv("BRANCH_NAME"):
      logging.info("BRANCH_NAME=%s; setting detph=all",
                   os.getenv("BRANCH_NAME"))
      depth = "all"

    checkout["name"] = "checkout"
    checkout["container"]["command"] = ["/usr/local/bin/checkout_repos.sh",
                                        "--repos=" + repos_str,
                                        "--depth={0}".format(depth),
                                        "--src_dir=" + self.src_root_dir]

    argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout, [])

    # Change the workfing directory for all subsequent steps
    task_template["container"]["workingDir"] = os.path.join(
      self.kfctl_pytest_dir)
    py3_template["container"]["workingDir"] = os.path.join(self.kfctl_pytest_dir)

    #**************************************************************************
    # Run build_kfctl and deploy kubeflow

    step_name = "kfctl-build-deploy"
    command = [
        "pytest",
        "kfctl_go_test.py",
        # I think -s mean stdout/stderr will print out to aid in debugging.
        # Failures still appear to be captured and stored in the junit file.
        "-s",
        "--app_name=" + self.app_name,
        "--config_path=" + self.config_path,
        "--values=" + self.values_str,
        "--build_and_apply=" + str(self.build_and_apply),
        # Increase the log level so that info level log statements show up.
        # TODO(https://github.com/kubeflow/testing/issues/372): If we
        # set a unique artifacts dir for each workflow with the proper
        # prefix that should work.
        "--log-cli-level=info",
        "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test"
        + self.config_name + ".xml",
        # TODO(jlewi) Test suite name needs to be unique based on parameters.
        #
        "-o", "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
        "--app_path=" + self.app_dir,
        "--kfctl_repo_path=" + self.src_dir,
        "--self_signed_cert=True",
    ]

    dependences = [checkout["name"]]
    build_kfctl = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                   py3_template, command, dependences)

    #**************************************************************************
    # Wait for Kubeflow to be ready
    step_name = "kubeflow-is-ready"
    command = [
           "pytest",
           "kf_is_ready_test.py",
           # I think -s mean stdout/stderr will print out to aid in debugging.
           # Failures still appear to be captured and stored in the junit file.
           "-s",
           # TODO(jlewi): We should update kf_is_ready_test to take the config
           # path and then based on the KfDef spec kf_is_ready_test should
           # figure out what to do.
           "--use_basic_auth={0}".format(self.use_basic_auth),
           # TODO(jlewi): We should be using ISTIO always so can we stop
           # setting this
           "--use_istio=true",
           # Increase the log level so that info level log statements show up.
           "--log-cli-level=info",
           "--junitxml=" + os.path.join(self.artifacts_dir,
                                        "junit_kfctl-is-ready-test-" +
                                        self.config_name + ".xml"),
           # Test suite name needs to be unique based on parameters
           "-o", "junit_suite_name=test_kf_is_ready_" + self.config_name,
           "--app_path=" + self.app_dir,
         ]

    dependences = [build_kfctl["name"]]
    kf_is_ready = self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                                   command, dependences)


    #**************************************************************************
    # Wait for endpoint to be ready
    if self.test_endpoint:
      self._test_endpoint_step_name = "endpoint-is-ready"
      command = ["pytest",
                 "endpoint_ready_test.py",
                 # I think -s mean stdout/stderr will print out to aid in debugging.
                 # Failures still appear to be captured and stored in the junit file.
                 "-s",
                 # Increase the log level so that info level log statements show up.
                 "--log-cli-level=info",
                 "--junitxml=" + self.artifacts_dir + "/junit_endpoint-is-ready-test-" + self.config_name + ".xml",
                 # Test suite name needs to be unique based on parameters
                 "-o", "junit_suite_name=test_endpoint_is_ready_" + self.config_name,
                 "--app_path=" + self.app_dir,
                 "--app_name=" + self.app_name,
                 "--use_basic_auth={0}".format(self.use_basic_auth),
              ]

      dependencies = [build_kfctl["name"]]
      endpoint_ready = self._build_step(self._test_endpoint_step_name,
                                        self.workflow, E2E_DAG_NAME, py3_template,
                                        command, dependencies)
      self._test_endpoint_template_name = endpoint_ready["name"]

    #**************************************************************************
    # Do kfctl apply again. This test will be skip if it's presubmit.
    step_name = "kfctl-second-apply"
    command = [
           "pytest",
           "kfctl_second_apply.py",
           # I think -s mean stdout/stderr will print out to aid in debugging.
           # Failures still appear to be captured and stored in the junit file.
           "-s",
           "--log-cli-level=info",
           "--junitxml=" + os.path.join(self.artifacts_dir,
                                        "junit_kfctl-second-apply-test-" +
                                        self.config_name + ".xml"),
           # Test suite name needs to be unique based on parameters
           "-o", "junit_suite_name=test_kfctl_second_apply_" + self.config_name,
           "--app_path=" + self.app_dir,
           "--kfctl_path=" + self.kfctl_path,
         ]
    if self.test_endpoint:
      dependences = [kf_is_ready["name"], endpoint_ready["name"]]
    else:
      dependences = [kf_is_ready["name"]]

    kf_second_apply = self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                                       command, dependences)

    self._build_tests_dag()

    # Add a task to run the dag
    dependencies = [kf_is_ready["name"]]
    self._run_tests_step_name = TESTS_DAG_NAME
    run_tests_template_name = TESTS_DAG_NAME
    argo_build_util.add_task_only_to_dag(self.workflow, E2E_DAG_NAME, self._run_tests_step_name,
                                         run_tests_template_name,
                                         dependencies)

    #***************************************************************************
    # create_pr_symlink
    #***************************************************************************
    # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
    step_name = "create-pr-symlink"
    command = ["python",
               "-m",
               "kubeflow.testing.prow_artifacts",
               "--artifacts_dir=" + self.output_dir,
               "create_pr_symlink"]

    if self.bucket:
      command.append(self.bucket)

    dependences = [checkout["name"]]
    symlink = self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                               command, dependences)

    self._build_exit_dag()


    # Set the labels on all templates
    self.workflow = argo_build_util.set_task_template_labels(self.workflow)

    return self.workflow
Esempio n. 18
0
  def build(self):
    workflow = self._build_workflow()
    task_template = self._build_task_template()

    #**************************************************************************
    # Checkout

    # create the checkout step
    main_repo = argo_build_util.get_repo_from_prow_env()
    if not main_repo:
      logging.info("Prow environment variables for repo not set")
      main_repo = "kubeflow/testing@HEAD"
    logging.info("Main repository: %s", main_repo)
    repos = [main_repo]

    checkout = argo_build_util.deep_copy(task_template)

    checkout["name"] = "checkout"
    checkout["container"]["command"] = ["/usr/local/bin/checkout_repos.sh",
                                        "--repos=" + ",".join(repos),
                                        "--src_dir=" + self.src_root_dir]

    argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME, checkout, [])

    #**************************************************************************
    # Make dir
    # pytest was failing trying to call makedirs. My suspicion is its
    # because the two steps ended up trying to create the directory at the
    # same time and classing. So we create a separate step to do it.
    mkdir_step = argo_build_util.deep_copy(task_template)

    mkdir_step["name"] = "make-artifacts-dir"
    mkdir_step["container"]["command"] = ["mkdir",
                                          "-p",
                                          self.artifacts_dir]


    argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME, mkdir_step,
                                    [checkout["name"]])

    #**************************************************************************
    # Run python unittests
    py_tests = argo_build_util.deep_copy(task_template)

    py_tests["name"] = "py-test"
    py_tests["container"]["command"] = ["python",
                                        "-m",
                                        "kubeflow.testing.test_py_checks",
                                        "--artifacts_dir=" + self.artifacts_dir,
                                        # TODO(jlewi): Should we be searching
                                        # the entire py/kubeflo/testing tree?
                                        "--src_dir=" + self.kubeflow_testing_py
                                        + "kubeflow/tests"]


    argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME, py_tests,
                                    [mkdir_step["name"]])


    #***************************************************************************
    # py lint
    #***************************************************************************
    py_lint = argo_build_util.deep_copy(task_template)

    py_lint["name"] = "py-lint"
    py_lint["container"]["command"] = ["pytest",
                                       "test_py_lint.py",
                                       # I think -s mean stdout/stderr will
                                       # print out to aid in debugging.
                                       # Failures still appear to be captured
                                       # and stored in the junit file.
                                       "-s",
                                       "--src_dir=" + self.kubeflow_testing_py,
                                       "--rcfile=" + os.path.join(
                                         self.testing_src_dir, ".pylintrc"),
                                       # Test timeout in seconds.
                                       "--timeout=500",
                                       "--junitxml=" + self.artifacts_dir +
                                       "/junit_py-lint.xml"]

    py_lint_step = argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME,
                                                   py_lint,
                                                   [mkdir_step["name"]])

    py_lint_step["container"]["workingDir"] = os.path.join(
      self.testing_src_dir, "py/kubeflow/testing")

    #*****************************************************************************
    # create_pr_symlink
    #****************************************************************************
    # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
    symlink = argo_build_util.deep_copy(task_template)

    symlink["name"] = "create-pr-symlink"
    symlink["container"]["command"] = ["python",
                                       "-m",
                                       "kubeflow.testing.prow_artifacts",
                                       "--artifacts_dir=" + self.output_dir,
                                       "create_pr_symlink",
                                       ]

    if self.bucket:
      symlink["container"]["command"].append("--bucket=" + self.bucket)

    argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME, symlink,
                                    [checkout["name"]])

    #*****************************************************************************
    # Exit handler workflow
    #*****************************************************************************
    copy_artifacts = argo_build_util.deep_copy(task_template)

    copy_artifacts["name"] = "copy-artifacts"
    copy_artifacts["container"]["command"] = ["python",
                                              "-m",
                                              "kubeflow.testing.prow_artifacts",
                                              "--artifacts_dir=" +
                                              self.output_dir,
                                              "copy_artifacts"]

    if self.bucket:
      copy_artifacts["container"]["command"].append("--bucket=" + self.bucket)


    argo_build_util.add_task_to_dag(workflow, EXIT_DAG_NAME, copy_artifacts, [])


    # Set the labels on all templates
    workflow = argo_build_util.set_task_template_labels(workflow)

    return workflow
Esempio n. 19
0
    def build(self):
        self.workflow = self._build_workflow()
        task_template = self._build_task_template()
        py3_template = argo_build_util.deep_copy(task_template)
        py3_template["container"][
            "image"] = "527798164940.dkr.ecr.us-west-2.amazonaws.com/aws-kubeflow-ci/test-worker:v1.2-branch"
        default_namespace = "kubeflow"

        #**************************************************************************
        # Checkout
        # create the checkout step

        checkout = argo_build_util.deep_copy(task_template)

        # Construct the list of repos to checkout
        list_of_repos = DEFAULT_REPOS
        list_of_repos.append(self.main_repo)
        list_of_repos.extend(self.extra_repos)
        repos = util.combine_repos(list_of_repos)
        repos_str = ','.join(
            ['%s@%s' % (key, value) for (key, value) in repos.items()])

        # If we are using a specific branch (e.g. periodic tests for release branch)
        # then we need to use depth = all; otherwise checkout out the branch
        # will fail. Otherwise we checkout with depth=30. We want more than
        # depth=1 because the depth will determine our ability to find the common
        # ancestor which affects our ability to determine which files have changed
        depth = 30
        if os.getenv("BRANCH_NAME"):
            logging.info("BRANCH_NAME=%s; setting detph=all",
                         os.getenv("BRANCH_NAME"))
            depth = "all"

        checkout["name"] = "checkout"
        checkout["container"]["command"] = [
            "/usr/local/bin/checkout_repos.sh", "--repos=" + repos_str,
            "--depth={0}".format(depth), "--src_dir=" + self.src_root_dir
        ]

        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout,
                                        [])

        # Change the working directory for all subsequent steps
        task_template["container"]["workingDir"] = os.path.join(
            self.kfctl_pytest_dir)
        py3_template["container"]["workingDir"] = os.path.join(
            self.kfctl_pytest_dir)

        #***************************************************************************
        # create_pr_symlink
        #***************************************************************************
        # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
        step_name = "create-pr-symlink"
        command = [
            "python", "-m",
            "kubeflow.testing.cloudprovider.aws.prow_artifacts",
            "--artifacts_dir=" + self.output_dir, "create_pr_symlink_s3",
            "--bucket=" + self.bucket
        ]

        dependences = [checkout["name"]]
        symlink = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                   task_template, command, dependences)

        #**************************************************************************
        # Run build_kfctl

        step_name = "kfctl-build-deploy"
        command = [
            "pytest",
            "kfctl_go_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            "--config_path=" + self.config_path,
            "--values=" + self.values_str,
            # Increase the log level so that info level log statements show up.
            # TODO(https://github.com/kubeflow/testing/issues/372): If we
            # set a unique artifacts dir for each workflow with the proper
            # prefix that should work.
            "--log-cli-level=info",
            "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test" +
            self.config_name + ".xml",
            # TODO(jlewi) Test suite name needs to be unique based on parameters.
            "-o",
            "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
            "--kfctl_repo_path=" + self.src_dir,
        ]

        dependences = [checkout["name"]]
        build_kfctl = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                       py3_template, command, dependences)

        #**************************************************************************
        # Create EKS cluster for E2E test
        step_name = "kfctl-create-cluster"
        command = [
            "pytest",
            "kfctl_create_cluster_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            "--cluster_name=" + self.cluster_name,
            "--eks_cluster_version=" + str(self.eks_cluster_version),
            # Embedded Script in the ECR Image
            "--cluster_creation_script=" +
            "/usr/local/bin/create-eks-cluster.sh",
            "--values=" + self.values_str,
            # Increase the log level so that info level log statements show up.
            # TODO(https://github.com/kubeflow/testing/issues/372): If we
            # set a unique artifacts dir for each workflow with the proper
            # prefix that should work.
            "--log-cli-level=info",
            "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test" +
            self.config_name + ".xml",
            # TODO(jlewi) Test suite name needs to be unique based on parameters.
            "-o",
            "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
        ]

        dependences = [checkout["name"]]
        create_cluster = self._build_step(step_name, self.workflow,
                                          E2E_DAG_NAME, py3_template, command,
                                          dependences)

        #**************************************************************************
        # Deploy Kubeflow
        step_name = "kfctl-deploy-kubeflow"
        command = [
            "pytest",
            "kfctl_deploy_kubeflow_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            "--cluster_name=" + self.cluster_name,
            # Embedded Script in the ECR Image
            "--cluster_creation_script=" +
            "/usr/local/bin/create-eks-cluster.sh",
            "--config_path=" + self.config_path,
            "--values=" + self.values_str,
            "--build_and_apply=" + str(self.build_and_apply),
            # Increase the log level so that info level log statements show up.
            # TODO(https://github.com/kubeflow/testing/issues/372): If we
            # set a unique artifacts dir for each workflow with the proper
            # prefix that should work.
            "--log-cli-level=info",
            "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test" +
            self.config_name + ".xml",
            # TODO(jlewi) Test suite name needs to be unique based on parameters.
            "-o",
            "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
            "--app_path=" + self.app_dir,
            "--kfctl_repo_path=" + self.src_dir,
        ]

        dependences = [
            build_kfctl["name"], create_cluster["name"], symlink["name"]
        ]
        deploy_kf = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                     py3_template, command, dependences)

        #**************************************************************************
        # Wait for Kubeflow to be ready
        step_name = "kubeflow-is-ready"
        command = [
            "pytest",
            "kf_is_ready_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            # TODO(jlewi): We should update kf_is_ready_test to take the config
            # path and then based on the KfDef spec kf_is_ready_test should
            # figure out what to do.
            "--use_basic_auth={0}".format(self.use_basic_auth),
            # Increase the log level so that info level log statements show up.
            "--log-cli-level=info",
            "--junitxml=" + os.path.join(
                self.artifacts_dir,
                "junit_kfctl-is-ready-test-" + self.config_name + ".xml"),
            # Test suite name needs to be unique based on parameters
            "-o",
            "junit_suite_name=test_kf_is_ready_" + self.config_name,
            "--app_path=" + self.app_dir,
            "--cluster_name=" + self.cluster_name,
            "--namespace=" + default_namespace,
        ]

        dependences = [deploy_kf["name"]]
        kf_is_ready = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                       task_template, command, dependences)

        #**************************************************************************
        # Run functional tests
        dependences = [kf_is_ready["name"]]
        dependences = self._build_tests_dag(dependences=dependences)

        #***********************************************************************
        # Delete Kubeflow
        # Putting Delete Kubeflow here is deletion functionality should be tested out of exit DAG
        step_name = "kfctl-delete-wrong-host"
        command = [
            "pytest",
            "kfctl_delete_wrong_cluster.py",
            "-s",
            "--log-cli-level=info",
            "--timeout=1000",
            "--junitxml=" + self.artifacts_dir +
            "/junit_kfctl-go-delete-wrong-cluster-test.xml",
            "--app_path=" + self.app_dir,
            "--kfctl_path=" + self.kfctl_path,
            "--cluster_name=" + self.cluster_name,
        ]

        kfctl_delete_wrong_cluster = self._build_step(step_name, self.workflow,
                                                      E2E_DAG_NAME,
                                                      task_template, command,
                                                      dependences)
        kfctl_delete_wrong_cluster["container"][
            "workingDir"] = self.kfctl_pytest_dir

        step_name = "kfctl-delete"
        command = [
            "pytest",
            "kfctl_delete_test.py",
            "-s",
            "--log-cli-level=info",
            "--timeout=1000",
            "--junitxml=" + self.artifacts_dir +
            "/junit_kfctl-go-delete-test.xml",
            "--app_path=" + self.app_dir,
            "--kfctl_path=" + self.kfctl_path,
            "--cluster_name=" + self.cluster_name,
        ]

        kfctl_delete = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                        task_template, command,
                                        ["kfctl-delete-wrong-host"])
        kfctl_delete["container"]["workingDir"] = self.kfctl_pytest_dir

        #***************************************************************************
        # Exit DAG
        #***************************************************************************
        self._build_exit_dag()

        # Set the labels on all templates
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow
    def build(self):
        self.workflow = self._build_workflow()
        task_template = self._build_task_template()

        # **************************************************************************
        # Checkout

        # create the checkout step
        main_repo = argo_build_util.get_repo_from_prow_env()
        if not main_repo:
            logging.info("Prow environment variables for repo not set")
            main_repo = MAIN_REPO + "@HEAD"
        logging.info("Main repository: %s", main_repo)
        repos = [main_repo]

        repos.extend(EXTRA_REPOS)

        #***************************************************************************
        # Checkout the code
        checkout = argo_build_util.deep_copy(task_template)

        checkout["name"] = "checkout"
        checkout["container"]["command"] = [
            "/usr/local/bin/checkout_repos.sh", "--repos=" + ",".join(repos),
            "--src_dir=" + self.src_root_dir
        ]

        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout,
                                        [])

        #***************************************************************************
        # Get credentials for the latest auto-deployed cluster

        credentials = argo_build_util.deep_copy(task_template)

        credentials["name"] = "get-credentials"
        credentials["container"]["command"] = [
            "python3",
            "-m",
            "kubeflow.testing."
            "get_kf_testing_cluster",
            "get-credentials",
        ]

        dependencies = [checkout["name"]]
        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME,
                                        credentials, dependencies)

        #**************************************************************************
        # Run a dag of tests
        self._build_tests_dag()

        # Add a task to run the dag
        dependencies = [credentials["name"]]
        argo_build_util.add_task_only_to_dag(self.workflow, E2E_DAG_NAME,
                                             TESTS_DAG_NAME, TESTS_DAG_NAME,
                                             dependencies)

        # **************************************************************************
        # create_pr_symlink
        # ***************************************************************************
        # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
        step_name = "create-pr-symlink"
        command = [
            "python", "-m", "kubeflow.testing.prow_artifacts",
            "--artifacts_dir=" + self.output_dir, "create_pr_symlink"
        ]

        if self.bucket:
            command.append(self.bucket)

        dependencies = [checkout["name"]]
        self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                         command, dependencies)

        self._build_exit_dag()

        # Set the labels on all templates
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow