util.run(["ks", "pkg", "install", "kubeflow/examples"]) except subprocess.CalledProcessError as e: # Keep going if the package has already been added. # This is a sign the a previous attempt failed and we are retrying. if not re.search(".*already exists.*", e.output): raise if args.tf_job_version == "v1alpha2": prototype_name = "tf-job-simple" elif args.tf_job_version == "v1alpha1": prototype_name = "tf-job-simple-v1alpha1" else: raise ValueError("Unrecognized value for tf_job_version: %s" % args.tf_job_version) util.run(["ks", "generate", prototype_name, "tf-job-simple"]) util.run(["ks", "apply", "default", "-c", "tf-job-simple"]) try: wait_for_tf_job() logging.info("TFJob launched successfully") except Exception as e: test_case.add_failure_info(e.message) if __name__ == "__main__": test_case = test_helper.TestCase( name="test_tf_job_simple", test_func=test_tf_job_simple) test_suite = test_helper.init( name="", test_cases=[test_case]) test_suite.run()
help= "Comma separated directories used by jsonnet to find additional libraries" ) args, _ = parser.parse_known_args() return args def test_jsonnet(test_case): # pylint: disable=redefined-outer-name args = parse_args() if not args.test_files_dirs: raise ValueError('--test_files_dirs needs to be set') test_files_dirs = args.test_files_dirs.split(',') jsonnet_path_args = [] if len(args.jsonnet_path_dirs) > 0: for jsonnet_path_dir in args.jsonnet_path_dirs.split(','): jsonnet_path_args.append('--jpath') jsonnet_path_args.append(jsonnet_path_dir) run(test_files_dirs, jsonnet_path_args, test_case) if __name__ == "__main__": test_case = test_helper.TestCase(name='test_jsonnet', test_func=test_jsonnet) test_suite = test_helper.init(name='jsonnet_test_suite', test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='deploy_kubeflow', test_func=deploy_kubeflow) test_suite = test_helper.init(name='deploy_kubeflow', test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='run_smoke_test', test_func=run_smoke_test) test_suite = test_helper.init(name='test_kubebench_job', test_cases=[test_case]) test_suite.run()
raise RuntimeError("Timed out waiting for " + resource) try: if 'error' not in util.run(["kubectl", "get", resource]).lower(): logging.info("Found %s.", resource) break except subprocess.CalledProcessError: logging.info("Could not find %s. Sleeping for 10 seconds..", resource) time.sleep(10) def test_wait_for_deployment(test_case): # pylint: disable=redefined-outer-name,unused-argument args = parse_args() util.maybe_activate_service_account() util.run([ "gcloud", "container", "clusters", "get-credentials", args.cluster, "--zone=" + args.zone, "--project=" + args.project ]) end_time = datetime.datetime.now() + datetime.timedelta( 0, args.timeout * 60) wait_for_resource("crd/tfjobs.kubeflow.org", end_time) wait_for_resource("crd/workflows.argoproj.io", end_time) logging.info("Found all resources successfully") if __name__ == "__main__": test_case = test_helper.TestCase(name="test_wait_for_deployment", test_func=test_wait_for_deployment) test_suite = test_helper.init(name="", test_cases=[test_case]) test_suite.run()
continue # excludes can be done with fnmatch.filter and complementary set, # but it's more annoying to read. if should_exclude(root, full_dir_excludes): continue dirs[:] = [d for d in dirs] for pat in includes: for f in fnmatch.filter(files, pat): full_path = os.path.join(root, f) try: util.run(["pylint", "--rcfile=" + rc_file, full_path], cwd=args.src_dir) except subprocess.CalledProcessError: failed_files.append(full_path[len(args.src_dir):]) if failed_files: failed_files.sort() test_case.add_failure_info("Files with lint issues: {0}".format( ", ".join(failed_files))) logging.error("%s files had lint errors:\n%s", len(failed_files), "\n".join(failed_files)) else: logging.info("No lint issues.") if __name__ == "__main__": test_case = test_helper.TestCase(name='test_lint', test_func=test_lint) test_suite = test_helper.init(name='py_lint', test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='teardown_kubeflow_gcp', test_func=teardown_kubeflow_gcp) test_suite = test_helper.init(name='deploy_kubeflow_gcp', test_cases=[test_case]) test_suite.run()