util.run(["ks", "generate", prototype_name, name]) util.run(["ks", "apply", "default", "-c", "tf-job-simple"]) def test_tf_job_simple(test_case): # pylint: disable=redefined-outer-name args = parse_args() namespace = "default" name = "tf-job-simple" util.load_kube_config() api_client = k8s_client.ApiClient() create_app_and_job(args, namespace, name) try: tf_job_client.wait_for_condition( api_client, namespace, name, ["Running"], status_callback=tf_job_client.log_status) logging.info("TFJob launched successfully") except Exception as e: logging.error("Test failed waiting for job; %s", e) test_case.add_failure_info(e.message) if __name__ == "__main__": test_case = test_helper.TestCase(name="test_tf_job_simple", test_func=test_tf_job_simple) test_suite = test_helper.init(name="test_tf_job_simple", test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase( name='deploy_kubeflow', test_func=deploy_kubeflow) test_suite = test_helper.init( name='deploy_kubeflow', test_cases=[test_case]) test_suite.run()
help= "Comma separated directories used by jsonnet to find additional libraries" ) args, _ = parser.parse_known_args() return args def test_jsonnet(test_case): # pylint: disable=redefined-outer-name args = parse_args() if not args.test_files_dirs: raise ValueError('--test_files_dirs needs to be set') test_files_dirs = args.test_files_dirs.split(',') jsonnet_path_args = [] if len(args.jsonnet_path_dirs) > 0: for jsonnet_path_dir in args.jsonnet_path_dirs.split(','): jsonnet_path_args.append('--jpath') jsonnet_path_args.append(jsonnet_path_dir) run(test_files_dirs, jsonnet_path_args, test_case) if __name__ == "__main__": test_case = test_helper.TestCase(name='test_jsonnet', test_func=test_jsonnet) test_suite = test_helper.init(name='jsonnet_test_suite', test_cases=[test_case]) test_suite.run()
out = util.run([ "kubectl", "get", "services", "-l", "tf_job_name=mycnnjob", "-ndefault" ]) if "No resources found" in out \ or len(out.split("\n")) != 3: raise Exception( "Could not find services with label tf_job_name=mycnnjob") logging.info("Found services with label tf_job_name=mycnnjob") def test_tf_job_simple(test_case): # pylint: disable=redefined-outer-name args = parse_args() util.run(["ks", "init", "tf-job-simple-app"]) os.chdir("tf-job-simple-app") util.run(["ks", "registry", "add", "kubeflow", args.src_dir + "/kubeflow"]) util.run(["ks", "pkg", "install", "kubeflow/examples"]) util.run(["ks", "generate", "tf-job-simple", "tf-job-simple"]) util.run(["ks", "apply", "default", "-c", "tf-job-simple"]) try: wait_for_tf_job() logging.info("TFJob launched successfully") except Exception as e: test_case.add_failure_info(e.message) if __name__ == "__main__": test_case = test_helper.TestCase(name="test_tf_job_simple", test_func=test_tf_job_simple) test_suite = test_helper.init(name="", test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='get_gke_credentials', test_func=get_gke_credentials) test_suite = test_helper.init(name='get_gke_credentials', test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name="test_invalid_job", test_func=test_invalid_job) test_suite = test_helper.init(name="test_invalid_job", test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='update_dm_config', test_func=update_dm_config) test_suite = test_helper.init(name='update_dm_config', test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='run_smoke_test', test_func=run_smoke_test) test_suite = test_helper.init(name='test_kubebench_job', test_cases=[test_case]) test_suite.run()
time.sleep(polling_interval.seconds) # Linter complains if we don't have a return statement even though #this code is unreachable. return None def test_katib(test_case): # pylint: disable=redefined-outer-name args = parse_args() namespace = NAMESPACE name = "katib-studyjob-test" util.load_kube_config() api_client = k8s_client.ApiClient() create_app_and_job(args, namespace, name) try: wait_for_condition(api_client, namespace, name, ["Running"], status_callback=log_status) logging.info("StudyJob launched successfully") except Exception as e: logging.error("Test failed waiting for job; %s", e) test_case.add_failure_info(e.message) if __name__ == "__main__": test_case = test_helper.TestCase(name="test_katib", test_func=test_katib) test_suite = test_helper.init(name="test_katib", test_cases=[test_case]) test_suite.run()
for root, dirs, files in os.walk(os.path.abspath(args.src_dir), topdown=True): # excludes can be done with fnmatch.filter and complementary set, # but it's more annoying to read. if should_exclude(root, full_dir_excludes): continue dirs[:] = [d for d in dirs] for pat in includes: for f in fnmatch.filter(files, pat): full_path = os.path.join(root, f) try: util.run(["pylint", "--rcfile=" + rc_file, full_path], cwd=args.src_dir) except subprocess.CalledProcessError: failed_files.append(full_path[len(args.src_dir):]) if failed_files: failed_files.sort() test_case.add_failure_info("Files with lint issues: {0}".format( ", ".join(failed_files))) logging.error("%s files had lint errors:\n%s", len(failed_files), "\n".join(failed_files)) else: logging.info("No lint issues.") if __name__ == "__main__": test_case = test_helper.TestCase(name='test_lint', test_func=test_lint) test_suite = test_helper.init(name='py_lint', test_cases=[test_case]) test_suite.run()
# them automatically? dir_excludes = ["vendor"] includes = ["*_test.py"] failed_files = [] num_failed = 0 for root, dirs, files in os.walk(args.src_dir, topdown=True): # excludes can be done with fnmatch.filter and complementary set, # but it's more annoying to read. dirs[:] = [d for d in dirs if d not in dir_excludes] for pat in includes: for f in fnmatch.filter(files, pat): full_path = os.path.join(root, f) try: util.run(["python", full_path], cwd=args.src_dir) except subprocess.CalledProcessError: failed_files.append(full_path[len(args.src_dir):]) num_failed += 1 if num_failed: logging.error("%s tests failed.", num_failed) test_case.add_failure_info("{0} tests failed: {1}.".format( num_failed, ", ".join(failed_files))) else: logging.info("No test issues.") if __name__ == "__main__": test_case = test_helper.TestCase(name='py_test', test_func=py_test) test_suite = test_helper.init(name='py_checks', test_cases=[test_case]) test_suite.run()
msg = '{} test failed'.format(test_file) test_case.add_failure_info(msg) logging.error( '{} with exception {}. See Subprocess output for ' 'details.'.format(msg, e)) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--test_files_dirs", default=".", type=str, help="Comma separated directories containing Python files") args, _ = parser.parse_known_args() return args def test_flake8(test_case): # pylint: disable=redefined-outer-name args = parse_args() if not args.test_files_dirs: raise ValueError('--test_files_dirs needs to be set') run(args.test_files_dirs.split(','), test_case) if __name__ == "__main__": test_case = test_helper.TestCase(name='test_flake8', test_func=test_flake8) test_suite = test_helper.init(name='flake8_test_suite', test_cases=[test_case]) test_suite.run()
def is_excluded(file_name, exclude_dirs): for exclude_dir in exclude_dirs: if file_name.startswith(exclude_dir): return True return False def test_jsonnet_formatting(test_case): # pylint: disable=redefined-outer-name logging.info('Running test_jsonnet_formatting') args = parse_args() exclude_dirs = [] if args.exclude_dirs: exclude_dirs = args.exclude_dirs.split(',') for dirpath, _, filenames in os.walk(args.src_dir): jsonnet_files = fnmatch.filter(filenames, '*.jsonnet') libsonnet_files = fnmatch.filter(filenames, '*.libsonnet') for file_name in itertools.chain(jsonnet_files, libsonnet_files): full_path = os.path.join(dirpath, file_name) if not is_excluded(full_path, exclude_dirs) and not is_formatted(full_path): test_case.add_failure_info( "ERROR : {0} is not formatted".format(full_path)) if __name__ == "__main__": test_case = test_helper.TestCase(name='test_jsonnet_formatting', test_func=test_jsonnet_formatting) test_suite = test_helper.init(name='test_jsonnet_formatting', test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='run_with_retry', test_func=run_with_retry) test_suite = test_helper.init(name='run_with_retry', test_cases=[test_case]) test_suite.run()
def wait_for_resource(resource, end_time): while True: if datetime.datetime.now() > end_time: raise RuntimeError("Timed out waiting for " + resource) try: if 'error' not in util.run(["kubectl", "get", resource]).lower(): logging.info("Found %s.", resource) break except subprocess.CalledProcessError: logging.info("Could not find %s. Sleeping for 10 seconds..", resource) time.sleep(10) def test_wait_for_deployment(test_case): # pylint: disable=redefined-outer-name,unused-argument args = parse_args() util.maybe_activate_service_account() util.run([ "gcloud", "container", "clusters", "get-credentials", args.cluster, "--zone=" + args.zone, "--project=" + args.project]) end_time = datetime.datetime.now() + datetime.timedelta(0, args.timeout*60) wait_for_resource("crd/tfjobs.kubeflow.org", end_time) wait_for_resource("crd/workflows.argoproj.io", end_time) logging.info("Found all resources successfully") if __name__ == "__main__": test_case = test_helper.TestCase( name="wait_for_deployment", test_func=test_wait_for_deployment) test_suite = test_helper.init( name="test_wait_for_deployment", test_cases=[test_case]) test_suite.run()
def main(): test_case = test_helper.TestCase(name='teardown_kubeflow_gcp', test_func=teardown_kubeflow_gcp) test_suite = test_helper.init(name='deploy_kubeflow_gcp', test_cases=[test_case]) test_suite.run()