def build(self):
        self.workflow = super(Builder, self).build()
        task_template = self._build_task_template()

        # Change the workflow_template labels
        self.workflow["metadata"]["labels"][
            "workflow_template"] = TEMPLATE_LABEL

        # Add the dag to upgrade Kubeflow to the workflow
        self._build_upgrade_dag()

        # Add a task to the E2E dag to run the dag to upgrade Kubeflow.
        dependencies = [self._run_tests_step_name]

        if self._test_endpoint_step_name:
            dependencies.append(self._test_endpoint_step_name)

        step_name = UPGRADE_DAG_NAME
        template_name = UPGRADE_DAG_NAME
        argo_build_util.add_task_only_to_dag(self.workflow,
                                             kfctl_e2e_workflow.E2E_DAG_NAME,
                                             step_name, template_name,
                                             dependencies)

        #****************************************************************************
        # Add tests DAG
        #****************************************************************************
        # After running upgrade we want to rerun the DAG(s) that validate the deployment is healthy

        step_name = "test-after-upgrade"
        template_name = kfctl_e2e_workflow.TESTS_DAG_NAME
        dependencies = [UPGRADE_DAG_NAME]
        argo_build_util.add_task_only_to_dag(self.workflow,
                                             kfctl_e2e_workflow.E2E_DAG_NAME,
                                             step_name, template_name,
                                             dependencies)

        # Test the endpoint after upgrade
        if self.test_endpoint:
            dependencies = [UPGRADE_DAG_NAME]
            step_name = "upgraded-endpoint-ready"
            argo_build_util.add_task_only_to_dag(
                self.workflow, kfctl_e2e_workflow.E2E_DAG_NAME, step_name,
                self._test_endpoint_template_name, dependencies)

        # Reset the labels on all templates to pick up the updated workflow template label
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow
Exemple #2
0
    def build(self):
        self.workflow = self._build_workflow()
        task_template = self._build_task_template()

        #**************************************************************************
        # Checkout

        # create the checkout step
        main_repo = argo_build_util.get_repo_from_prow_env()
        if not main_repo:
            logging.info("Prow environment variables for repo not set")
            main_repo = MAIN_REPO + "@HEAD"
        logging.info("Main repository: %s", main_repo)
        repos = [main_repo]

        repos.extend(EXTRA_REPOS)

        checkout = argo_build_util.deep_copy(task_template)

        checkout["name"] = "checkout"
        checkout["container"]["command"] = [
            "/usr/local/bin/checkout_repos.sh", "--repos=" + ",".join(repos),
            "--src_dir=" + self.src_root_dir
        ]

        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout,
                                        [])

        # Change the workfing directory for all subsequent steps
        task_template["container"]["workingDir"] = os.path.join(
            self.kfctl_pytest_dir)

        #**************************************************************************
        # Run build_kfctl and deploy kubeflow

        step_name = "kfctl-build-deploy"
        command = [
            "pytest",
            "kfctl_go_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            "--config_path=" + self.config_path,
            "--build_and_apply=" + str(self.build_and_apply),
            # Increase the log level so that info level log statements show up.
            # TODO(https://github.com/kubeflow/testing/issues/372): If we
            # set a unique artifacts dir for each workflow with the proper
            # prefix that should work.
            "--log-cli-level=info",
            "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test" +
            self.config_name + ".xml",
            # TODO(jlewi) Test suite name needs to be unique based on parameters.
            #
            "-o",
            "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
            "--app_path=" + self.app_dir,
        ]

        dependences = [checkout["name"]]
        build_kfctl = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                       task_template, command, dependences)

        #**************************************************************************
        # Wait for Kubeflow to be ready
        step_name = "kubeflow-is-ready"
        command = [
            "pytest",
            "kf_is_ready_test.py",
            # I think -s mean stdout/stderr will print out to aid in debugging.
            # Failures still appear to be captured and stored in the junit file.
            "-s",
            # TODO(jlewi): We should update kf_is_ready_test to take the config
            # path and then based on the KfDef spec kf_is_ready_test should
            # figure out what to do.
            "--use_basic_auth={0}".format(self.use_basic_auth),
            # TODO(jlewi): We should be using ISTIO always so can we stop
            # setting this
            "--use_istio=true",
            # Increase the log level so that info level log statements show up.
            "--log-cli-level=info",
            "--junitxml=" + os.path.join(
                self.artifacts_dir,
                "junit_kfctl-is-ready-test-" + self.config_name + ".xml"),
            # Test suite name needs to be unique based on parameters
            "-o",
            "junit_suite_name=test_kf_is_ready_" + self.config_name,
            "--app_path=" + self.app_dir,
        ]

        dependences = [build_kfctl["name"]]
        kf_is_ready = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                       task_template, command, dependences)

        #**************************************************************************
        # Wait for endpoint to be ready
        if self.test_endpoint:
            step_name = "endpoint-is-ready"
            command = [
                "pytest",
                "endpoint_ready_test.py",
                # I think -s mean stdout/stderr will print out to aid in debugging.
                # Failures still appear to be captured and stored in the junit file.
                "-s",
                # Increase the log level so that info level log statements show up.
                "--log-cli-level=info",
                # Test timeout in seconds.
                "--timeout=1800",
                "--junitxml=" + self.artifacts_dir +
                "/junit_endpoint-is-ready-test-" + self.config_name + ".xml",
                # Test suite name needs to be unique based on parameters
                "-o",
                "junit_suite_name=test_endpoint_is_ready_" + self.config_name,
                "--app_path=" + self.app_dir,
                "--app_name=" + self.app_name,
            ]

            dependencies = [build_kfctl["name"]]
            endpoint_ready = self._build_step(step_name, self.workflow,
                                              E2E_DAG_NAME, task_template,
                                              command, dependencies)

        self._build_tests_dag()

        # Add a task to run the dag
        dependencies = [kf_is_ready["name"]]
        argo_build_util.add_task_only_to_dag(self.workflow, E2E_DAG_NAME,
                                             TESTS_DAG_NAME, TESTS_DAG_NAME,
                                             dependencies)

        #***************************************************************************
        # create_pr_symlink
        #***************************************************************************
        # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
        step_name = "create-pr-symlink"
        command = [
            "python",
            "-m",
            "kubeflow.testing.prow_artifacts",
            "--artifacts_dir=" + self.output_dir,
            "create_pr_symlink",
            "--bucket=" + self.bucket,
        ]

        dependences = [checkout["name"]]
        symlink = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                   task_template, command, dependences)

        self._build_exit_dag()

        # Set the labels on all templates
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow
    def build(self):
        self.workflow = self._build_workflow()
        task_template = self._build_task_template()

        # **************************************************************************
        # Checkout

        # create the checkout step
        main_repo = argo_build_util.get_repo_from_prow_env()
        if not main_repo:
            logging.info("Prow environment variables for repo not set")
            main_repo = MAIN_REPO + "@HEAD"
        logging.info("Main repository: %s", main_repo)
        repos = [main_repo]

        repos.extend(EXTRA_REPOS)

        #***************************************************************************
        # Checkout the code
        checkout = argo_build_util.deep_copy(task_template)

        checkout["name"] = "checkout"
        checkout["container"]["command"] = [
            "/usr/local/bin/checkout_repos.sh", "--repos=" + ",".join(repos),
            "--src_dir=" + self.src_root_dir
        ]

        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout,
                                        [])

        #***************************************************************************
        # Get credentials for the latest auto-deployed cluster

        credentials = argo_build_util.deep_copy(task_template)

        credentials["name"] = "get-credentials"
        credentials["container"]["command"] = [
            "python3",
            "-m",
            "kubeflow.testing."
            "get_kf_testing_cluster",
            "get-credentials",
        ]

        dependencies = [checkout["name"]]
        argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME,
                                        credentials, dependencies)

        #**************************************************************************
        # Run a dag of tests
        self._build_tests_dag()

        # Add a task to run the dag
        dependencies = [credentials["name"]]
        argo_build_util.add_task_only_to_dag(self.workflow, E2E_DAG_NAME,
                                             TESTS_DAG_NAME, TESTS_DAG_NAME,
                                             dependencies)

        # **************************************************************************
        # create_pr_symlink
        # ***************************************************************************
        # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
        step_name = "create-pr-symlink"
        command = [
            "python", "-m", "kubeflow.testing.prow_artifacts",
            "--artifacts_dir=" + self.output_dir, "create_pr_symlink"
        ]

        if self.bucket:
            command.append(self.bucket)

        dependencies = [checkout["name"]]
        self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                         command, dependencies)

        self._build_exit_dag()

        # Set the labels on all templates
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow
  def build(self):
    self.workflow = self._build_workflow()
    task_template = self._build_task_template()
    py3_template = argo_build_util.deep_copy(task_template)
    py3_template["container"]["image"] = "gcr.io/kubeflow-ci/test-worker-py3:e9afed1-dirty"

    #**************************************************************************
    # Checkout

    # create the checkout step

    checkout = argo_build_util.deep_copy(task_template)

    # Construct the list of repos to checkout
    list_of_repos = DEFAULT_REPOS
    list_of_repos.append(self.main_repo)
    list_of_repos.extend(self.extra_repos)
    repos = util.combine_repos(list_of_repos)
    repos_str = ','.join(['%s@%s' % (key, value) for (key, value) in repos.items()])


    # If we are using a specific branch (e.g. periodic tests for release branch)
    # then we need to use depth = all; otherwise checkout out the branch
    # will fail. Otherwise we checkout with depth=30. We want more than
    # depth=1 because the depth will determine our ability to find the common
    # ancestor which affects our ability to determine which files have changed
    depth = 30
    if os.getenv("BRANCH_NAME"):
      logging.info("BRANCH_NAME=%s; setting detph=all",
                   os.getenv("BRANCH_NAME"))
      depth = "all"

    checkout["name"] = "checkout"
    checkout["container"]["command"] = ["/usr/local/bin/checkout_repos.sh",
                                        "--repos=" + repos_str,
                                        "--depth={0}".format(depth),
                                        "--src_dir=" + self.src_root_dir]

    argo_build_util.add_task_to_dag(self.workflow, E2E_DAG_NAME, checkout, [])

    # Change the workfing directory for all subsequent steps
    task_template["container"]["workingDir"] = os.path.join(
      self.kfctl_pytest_dir)
    py3_template["container"]["workingDir"] = os.path.join(self.kfctl_pytest_dir)

    #**************************************************************************
    # Run build_kfctl and deploy kubeflow

    step_name = "kfctl-build-deploy"
    command = [
        "pytest",
        "kfctl_go_test.py",
        # I think -s mean stdout/stderr will print out to aid in debugging.
        # Failures still appear to be captured and stored in the junit file.
        "-s",
        "--app_name=" + self.app_name,
        "--config_path=" + self.config_path,
        "--values=" + self.values_str,
        "--build_and_apply=" + str(self.build_and_apply),
        # Increase the log level so that info level log statements show up.
        # TODO(https://github.com/kubeflow/testing/issues/372): If we
        # set a unique artifacts dir for each workflow with the proper
        # prefix that should work.
        "--log-cli-level=info",
        "--junitxml=" + self.artifacts_dir + "/junit_kfctl-build-test"
        + self.config_name + ".xml",
        # TODO(jlewi) Test suite name needs to be unique based on parameters.
        #
        "-o", "junit_suite_name=test_kfctl_go_deploy_" + self.config_name,
        "--app_path=" + self.app_dir,
        "--kfctl_repo_path=" + self.src_dir,
        "--self_signed_cert=True",
    ]

    dependences = [checkout["name"]]
    build_kfctl = self._build_step(step_name, self.workflow, E2E_DAG_NAME,
                                   py3_template, command, dependences)

    #**************************************************************************
    # Wait for Kubeflow to be ready
    step_name = "kubeflow-is-ready"
    command = [
           "pytest",
           "kf_is_ready_test.py",
           # I think -s mean stdout/stderr will print out to aid in debugging.
           # Failures still appear to be captured and stored in the junit file.
           "-s",
           # TODO(jlewi): We should update kf_is_ready_test to take the config
           # path and then based on the KfDef spec kf_is_ready_test should
           # figure out what to do.
           "--use_basic_auth={0}".format(self.use_basic_auth),
           # TODO(jlewi): We should be using ISTIO always so can we stop
           # setting this
           "--use_istio=true",
           # Increase the log level so that info level log statements show up.
           "--log-cli-level=info",
           "--junitxml=" + os.path.join(self.artifacts_dir,
                                        "junit_kfctl-is-ready-test-" +
                                        self.config_name + ".xml"),
           # Test suite name needs to be unique based on parameters
           "-o", "junit_suite_name=test_kf_is_ready_" + self.config_name,
           "--app_path=" + self.app_dir,
         ]

    dependences = [build_kfctl["name"]]
    kf_is_ready = self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                                   command, dependences)


    #**************************************************************************
    # Wait for endpoint to be ready
    if self.test_endpoint:
      self._test_endpoint_step_name = "endpoint-is-ready"
      command = ["pytest",
                 "endpoint_ready_test.py",
                 # I think -s mean stdout/stderr will print out to aid in debugging.
                 # Failures still appear to be captured and stored in the junit file.
                 "-s",
                 # Increase the log level so that info level log statements show up.
                 "--log-cli-level=info",
                 "--junitxml=" + self.artifacts_dir + "/junit_endpoint-is-ready-test-" + self.config_name + ".xml",
                 # Test suite name needs to be unique based on parameters
                 "-o", "junit_suite_name=test_endpoint_is_ready_" + self.config_name,
                 "--app_path=" + self.app_dir,
                 "--app_name=" + self.app_name,
                 "--use_basic_auth={0}".format(self.use_basic_auth),
              ]

      dependencies = [build_kfctl["name"]]
      endpoint_ready = self._build_step(self._test_endpoint_step_name,
                                        self.workflow, E2E_DAG_NAME, py3_template,
                                        command, dependencies)
      self._test_endpoint_template_name = endpoint_ready["name"]

    #**************************************************************************
    # Do kfctl apply again. This test will be skip if it's presubmit.
    step_name = "kfctl-second-apply"
    command = [
           "pytest",
           "kfctl_second_apply.py",
           # I think -s mean stdout/stderr will print out to aid in debugging.
           # Failures still appear to be captured and stored in the junit file.
           "-s",
           "--log-cli-level=info",
           "--junitxml=" + os.path.join(self.artifacts_dir,
                                        "junit_kfctl-second-apply-test-" +
                                        self.config_name + ".xml"),
           # Test suite name needs to be unique based on parameters
           "-o", "junit_suite_name=test_kfctl_second_apply_" + self.config_name,
           "--app_path=" + self.app_dir,
           "--kfctl_path=" + self.kfctl_path,
         ]
    if self.test_endpoint:
      dependences = [kf_is_ready["name"], endpoint_ready["name"]]
    else:
      dependences = [kf_is_ready["name"]]

    kf_second_apply = self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                                       command, dependences)

    self._build_tests_dag()

    # Add a task to run the dag
    dependencies = [kf_is_ready["name"]]
    self._run_tests_step_name = TESTS_DAG_NAME
    run_tests_template_name = TESTS_DAG_NAME
    argo_build_util.add_task_only_to_dag(self.workflow, E2E_DAG_NAME, self._run_tests_step_name,
                                         run_tests_template_name,
                                         dependencies)

    #***************************************************************************
    # create_pr_symlink
    #***************************************************************************
    # TODO(jlewi): run_e2e_workflow.py should probably create the PR symlink
    step_name = "create-pr-symlink"
    command = ["python",
               "-m",
               "kubeflow.testing.prow_artifacts",
               "--artifacts_dir=" + self.output_dir,
               "create_pr_symlink"]

    if self.bucket:
      command.append(self.bucket)

    dependences = [checkout["name"]]
    symlink = self._build_step(step_name, self.workflow, E2E_DAG_NAME, task_template,
                               command, dependences)

    self._build_exit_dag()


    # Set the labels on all templates
    self.workflow = argo_build_util.set_task_template_labels(self.workflow)

    return self.workflow
    def build(self):
        self.workflow = super(Builder, self).build()
        task_template = self._build_task_template()

        # Change the workflow_template labels
        self.workflow["metadata"]["labels"][
            "workflow_template"] = TEMPLATE_LABEL

        # Add the dag to upgrade Kubeflow to the workflow
        self._build_upgrade_dag()

        # Add a task to the E2E dag to run the dag to upgrade Kubeflow.
        dependencies = [self._run_tests_step_name]

        if self._test_endpoint_step_name:
            dependencies.append(self._test_endpoint_step_name)

        step_name = UPGRADE_DAG_NAME
        template_name = UPGRADE_DAG_NAME
        argo_build_util.add_task_only_to_dag(self.workflow,
                                             kfctl_e2e_workflow.E2E_DAG_NAME,
                                             step_name, template_name,
                                             dependencies)

        # Wait for Kubeflow to be ready after upgrading
        step_name = READY_AFTER_UPGRADE
        template_name = "kubeflow-is-ready"
        command = [
            "pytest",
            "kf_is_ready_test.py",
            "-s",
            "--log-cli-level=info",
            "--junitxml=" + os.path.join(
                self.artifacts_dir,
                "junit_ready-after-upgrade-test-" + self.config_name + ".xml"),
            "-o",
            "junit_suite_name=test_ready_after_upgrade_" + self.config_name,
            "--app_path=" + self.app_dir,
        ]

        dependencies = [UPGRADE_DAG_NAME]
        argo_build_util.add_task_only_to_dag(self.workflow,
                                             kfctl_e2e_workflow.E2E_DAG_NAME,
                                             step_name, template_name,
                                             dependencies)

        #****************************************************************************
        # Add tests DAG
        #****************************************************************************
        # After running upgrade we want to rerun the DAG(s) that validate the deployment is healthy

        step_name = "test-after-upgrade"
        template_name = kfctl_e2e_workflow.TESTS_DAG_NAME
        dependencies = [READY_AFTER_UPGRADE]
        argo_build_util.add_task_only_to_dag(self.workflow,
                                             kfctl_e2e_workflow.E2E_DAG_NAME,
                                             step_name, template_name,
                                             dependencies)

        # Test the endpoint after upgrade
        if self.test_endpoint:
            dependencies = [UPGRADE_DAG_NAME]
            step_name = "upgraded-endpoint-ready"
            argo_build_util.add_task_only_to_dag(
                self.workflow, kfctl_e2e_workflow.E2E_DAG_NAME, step_name,
                self._test_endpoint_template_name, dependencies)

        # Reset the labels on all templates to pick up the updated workflow template label
        self.workflow = argo_build_util.set_task_template_labels(self.workflow)

        return self.workflow