Exemplo n.º 1
0
def _touch_intermediate_project(diro, fkv, listener):
    status = diro.status

    # If directory already exists and has something in it, assume it's ok
    if 'non_empty_directory' == status:
        return 0

    # At this point, path must be noent
    if 'noent' != status:
        def lines():
            reason = status.replace('_', ' ')  # haha
            yield f"can't generate because {reason} - {diro.path}"
        listener('error', 'expression', 'cant_generate', lines)
        return 123

    # Call our task with the rando, messy, particular things we have to give it
    fkv = fkv.copy()
    author = fkv.pop('author')
    timezone = fkv.pop('timezone')
    if fkv:
        xx(f"why: {tuple(fkv.keys())!r}")

    from invoke.context import Context
    c = Context()
    from pho_tasks.tasks import make_pelican_intermediate_directory as task
    task(c, diro.path, author, timezone)  # you chose to result in None
    return 0  # for now we just assume success
def test_framework_version_cpu(cpu):
    """
    Check that the framework version in the image tag is the same as the one on a running container.

    :param cpu: ECR image URI with "cpu" in the name
    """
    image = cpu
    if "tensorflow-inference" in image:
        pytest.skip(msg="TF inference does not have core tensorflow installed")

    tested_framework, tag_framework_version = get_framework_and_version_from_tag(
        image)

    # Module name is torch
    if tested_framework == "pytorch":
        tested_framework = "torch"
    ctx = Context()
    container_name = f"framework-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}"
    _start_container(container_name, image, ctx)
    output = _run_cmd_on_container(
        container_name,
        ctx,
        f"import {tested_framework}; print({tested_framework}.__version__)",
        executable="python")
    if is_canary_context():
        assert tag_framework_version in output.stdout.strip()
    else:
        assert tag_framework_version == output.stdout.strip()
Exemplo n.º 3
0
def run_sagemaker_test_in_executor(image, num_of_instances, instance_type):
    """
    Run pytest in a virtual env for a particular image

    Expected to run under multi-threading

    :param num_of_instances: <int> number of instances the image test requires
    :param instance_type: type of sagemaker instance the test needs
    :param image: ECR url
    :return:
    """
    import log_return

    LOGGER.info("Started running SageMaker test.....")
    pytest_command, path, tag, job_type = sm_utils.generate_sagemaker_pytest_cmd(image, "sagemaker")

    # update resource pool accordingly, then add a try-catch statement here to update the pool in case of failure
    try:
        log_return.update_pool("running", instance_type, num_of_instances, job_type)
        context = Context()
        with context.cd(path):
            context.run(f"python3 -m virtualenv {tag}")
            with context.prefix(f"source {tag}/bin/activate"):
                context.run("pip install -r requirements.txt", warn=True)
                context.run(pytest_command)
    except Exception as e:
        LOGGER.error(e)
        return False

    return True
def test_pandas(image):
    """
    It's possible that in newer python versions, we may have issues with installing pandas due to lack of presence
    of the bz2 module in py3 containers. This is a sanity test to ensure that pandas import works properly in all
    containers.

    :param image: ECR image URI
    """
    ctx = Context()
    container_name = _get_container_name("pandas", image)
    _start_container(container_name, image, ctx)

    # Make sure we can install pandas, do not fail right away if there are pip check issues
    _run_cmd_on_container(container_name, ctx, "pip install pandas", warn=True)

    pandas_import_output = _run_cmd_on_container(container_name,
                                                 ctx,
                                                 "import pandas",
                                                 executable="python")

    assert (
        not pandas_import_output.stdout.strip()
    ), f"Expected no output when importing pandas, but got  {pandas_import_output.stdout}"

    # Simple import test to ensure we do not get a bz2 module import failure
    _run_cmd_on_container(container_name,
                          ctx,
                          "import pandas; print(pandas.__version__)",
                          executable="python")
Exemplo n.º 5
0
def test_repo_anaconda_not_present(image):
    """Test to see if all packages installed in the image do not come from repo.anaconda.com"""
    try:
        ctx = Context()
        container_name = test_utils.get_container_name("anaconda", image)
        test_utils.start_container(container_name, image, ctx)

        # First check to see if image has conda installed, if not, skip test since no packages installed from conda present
        conda_present = test_utils.run_cmd_on_container(
            container_name, ctx,
            "find . -name conda -not -path \"**/.github/*\"").stdout.strip()
        if not conda_present:
            pytest.skip(
                f"Image {image} does not have conda installed, skipping test.")

        # Commands are split in 2 because if warn=True, then even if first command fails silently, no error is raised
        test_utils.run_cmd_on_container(
            container_name, ctx, "conda list --explicit > repo_list.txt")

        grep_result = test_utils.run_cmd_on_container(
            container_name,
            ctx,
            "grep repo.anaconda.com repo_list.txt",
            warn=True).stdout.strip()
        if grep_result:
            raise RuntimeError(
                f"Image {image} contains packages installed from repo.anaconda.com. "
                f"Please ensure that these packages are obtained through conda-forge or other alternatives: {grep_result}"
            )
    finally:
        test_utils.stop_and_remove_container(container_name, ctx)
Exemplo n.º 6
0
def test_dataclasses_check(image):
    """
    Ensure there is no dataclasses pip package is installed for python 3.7 and above version.
    Python version retrieved from the ecr image uri is expected in the format `py<major_verion><minor_version>`
    :param image: ECR image URI
    """
    ctx = Context()
    pip_package = "dataclasses"

    container_name = get_container_name("dataclasses-check", image)

    python_version = get_python_version_from_image_uri(image).replace("py", "")
    python_version = int(python_version)

    if python_version >= 37:
        start_container(container_name, image, ctx)
        output = run_cmd_on_container(container_name,
                                      ctx,
                                      f"pip show {pip_package}",
                                      warn=True)

        if output.return_code == 0:
            pytest.fail(
                f"{pip_package} package exists in the DLC image {image} that has py{python_version} version which is greater than py36 version"
            )
        else:
            LOGGER.info(
                f"{pip_package} package does not exists in the DLC image {image}"
            )
    else:
        pytest.skip(
            f"Skipping test for DLC image {image} that has py36 version as {pip_package} is not included in the python framework"
        )
Exemplo n.º 7
0
def test_generate_coverage_doc():
    """
    Test generating the test coverage doc
    """
    test_coverage_file = get_test_coverage_file_path()
    ctx = Context()
    # Set DLC_IMAGES to 'test' to avoid image names affecting function metadata (due to parametrization)
    # Set CODEBUILD_RESOLVED_SOURCE_VERSION to test for ease of running this test locally
    ctx.run(
        "export DLC_IMAGES='' && export CODEBUILD_RESOLVED_SOURCE_VERSION='test' && export BUILD_CONTEXT=''"
        "&& pytest -s --collect-only  --generate-coverage-doc --ignore=container_tests/",
        hide=True,
    )

    # Ensure that the coverage report is created
    assert os.path.exists(test_coverage_file), f"Cannot find test coverage report file {test_coverage_file}"

    # Write test coverage file to S3
    if is_mainline_context():
        client = boto3.client("s3")
        with open(test_coverage_file, "rb") as test_file:
            try:
                client.put_object(Bucket=TEST_COVERAGE_REPORT_BUCKET, Key=os.path.basename(test_coverage_file),
                                  Body=test_file)
            except ClientError as e:
                LOGGER.error(f"Unable to upload report to bucket {TEST_COVERAGE_REPORT_BUCKET}. Error: {e}")
                raise
Exemplo n.º 8
0
    def _run(cmd: list, custom_working_dir=None, custom_env=None):
        if cmd is None:
            cmd = []
        quoted_cmd = [f'"{t}"' for t in cmd]

        if not custom_working_dir:
            custom_working_dir = working_dir
        if not custom_env:
            custom_env = env
        cli_full_line = '"{}" {}'.format(cli_path, " ".join(quoted_cmd))
        run_context = Context()
        # It might happen that we need to change directories between drives on Windows,
        # in that case the "/d" flag must be used otherwise directory wouldn't change
        cd_command = "cd"
        if platform.system() == "Windows":
            cd_command += " /d"
        # Context.cd() is not used since it doesn't work correctly on Windows.
        # It escapes spaces in the path using "\ " but it doesn't always work,
        # wrapping the path in quotation marks is the safest approach
        with run_context.prefix(f'{cd_command} "{custom_working_dir}"'):
            return run_context.run(cli_full_line,
                                   echo=False,
                                   hide=True,
                                   warn=True,
                                   env=custom_env,
                                   encoding="utf-8")
Exemplo n.º 9
0
def test_framework_version_cpu(image):
    """
    Check that the framework version in the image tag is the same as the one on a running container.
    This function tests CPU, EIA, and Neuron images.

    :param image: ECR image URI
    """
    if "gpu" in image:
        pytest.skip(
            "GPU images will have their framework version tested in test_framework_and_cuda_version_gpu"
        )
    if "tensorflow-inference" in image:
        pytest.skip(msg="TF inference does not have core tensorflow installed")

    tested_framework, tag_framework_version = get_framework_and_version_from_tag(
        image)

    # Module name is torch
    if tested_framework == "pytorch":
        tested_framework = "torch"
    ctx = Context()
    container_name = get_container_name("framework-version", image)
    start_container(container_name, image, ctx)
    output = run_cmd_on_container(
        container_name,
        ctx,
        f"import {tested_framework}; print({tested_framework}.__version__)",
        executable="python")
    if is_canary_context():
        assert tag_framework_version in output.stdout.strip()
    else:
        assert tag_framework_version == output.stdout.strip()
Exemplo n.º 10
0
def generate_safety_report_for_image(image_uri,
                                     image_info,
                                     storage_file_path=None):
    """
    Genereate safety scan reports for an image and store it at the location specified 

    :param image_uri: str, consists of f"{image_repo}:{image_tag}"
    :param image_info: dict, should consist of 3 keys - "framework", "python_version" and "image_type".
    :param storage_file_path: str, looks like "storage_location.json"
    :return: list[dict], safety report generated by SafetyReportGenerator
    """
    ctx = Context()
    docker_run_cmd = f"docker run -id --entrypoint='/bin/bash' {image_uri} "
    container_id = ctx.run(f"{docker_run_cmd}", hide=True,
                           warn=True).stdout.strip()
    install_safety_cmd = "pip install safety"
    docker_exec_cmd = f"docker exec -i {container_id}"
    ctx.run(f"{docker_exec_cmd} {install_safety_cmd}", hide=True, warn=True)
    ignore_dict = get_safety_ignore_dict(image_uri, image_info["framework"],
                                         image_info["python_version"],
                                         image_info["image_type"])
    safety_scan_output = SafetyReportGenerator(
        container_id, ignore_dict=ignore_dict).generate()
    ctx.run(f"docker rm -f {container_id}", hide=True, warn=True)
    if storage_file_path:
        with open(storage_file_path, "w", encoding="utf-8") as f:
            json.dump(safety_scan_output, f, indent=4)
    return safety_scan_output
def test_utility_packages_using_import(training):
    """
    Verify that utility packages are installed in the Training DLC image
    :param training: training ECR image URI
    """
    #TODO: revert once habana is supported on SM
    if "hpu" in training:
        pytest.skip("Skipping test for Habana images as SM is not yet supported")

    ctx = Context()
    container_name = test_utils.get_container_name("utility_packages_using_import", training)
    test_utils.start_container(container_name, training, ctx)

    framework, framework_version = test_utils.get_framework_and_version_from_tag(training)
    utility_package_minimum_framework_version = {
        "mxnet": "1.8",
        "pytorch": "1.7",
        "huggingface_pytorch": "1.7",
        "tensorflow2": "2.4",
        "tensorflow1": "1.15",
        "huggingface_tensorflow": "2.4",
    }

    if framework == "tensorflow":
        framework = "tensorflow1" if framework_version.startswith("1.") else "tensorflow2"

    if Version(framework_version) < Version(utility_package_minimum_framework_version[framework]):
        pytest.skip("Extra utility packages will be added going forward.")

    packages_to_import = UTILITY_PACKAGES_IMPORT

    for package in packages_to_import:
        version = test_utils.run_cmd_on_container(container_name, ctx, f"import {package}; print({package}.__version__)", executable="python").stdout.strip()
        if package == "sagemaker":
            assert Version(version) > Version("2"), f"Sagemaker version should be > 2.0. Found version {version}"
Exemplo n.º 12
0
def _print_results_of_test(file_path, processor):
    last_100_lines = Context().run(f"tail -100 {file_path}").stdout.split("\n")
    result = ""
    throughput = 0
    if processor == "cpu":
        for line in last_100_lines:
            if "Total img/sec on " in line:
                result = line + "\n"
                throughput = float(
                    re.search(
                        r"(CPU\(s\):[ ]*)(?P<throughput>[0-9]+\.?[0-9]+)",
                        line).group("throughput"))
                break
    elif processor == "gpu":
        result_dict = dict()
        for line in last_100_lines:
            if "images/sec: " in line:
                key = line.split("<stdout>")[0]
                result_dict[key] = line.strip("\n")
                if throughput == 0:
                    throughput = float(
                        re.search(
                            r"(images/sec:[ ]*)(?P<throughput>[0-9]+\.?[0-9]+)",
                            line).group("throughput"))
        result = "\n".join(result_dict.values()) + "\n"
    LOGGER.info(result)
    return result, throughput
Exemplo n.º 13
0
def daemon_runner(pytestconfig, data_dir, downloads_dir, working_dir):
    """
    Provide an invoke's `Local` object that has started the arduino-cli in daemon mode.
    This way is simple to start and kill the daemon when the test is finished
    via the kill() function

    Useful reference:
        http://docs.pyinvoke.org/en/1.4/api/runners.html#invoke.runners.Local
        http://docs.pyinvoke.org/en/1.4/api/runners.html
    """
    cli_full_line = os.path.join(str(pytestconfig.rootdir), "..", "arduino-cli daemon")
    env = {
        "ARDUINO_DATA_DIR": data_dir,
        "ARDUINO_DOWNLOADS_DIR": downloads_dir,
        "ARDUINO_SKETCHBOOK_DIR": data_dir,
    }
    os.makedirs(os.path.join(data_dir, "packages"))
    run_context = Context()
    run_context.cd(working_dir)
    # Local Class is the implementation of a Runner abstract class
    runner = Local(run_context)
    runner.run(cli_full_line, echo=False, hide=True, warn=True, env=env, asynchronous=True)

    # we block here until the test function using this fixture has returned
    yield runner

    # Kill the runner's process as we finished our test (platform dependent)
    os_signal = signal.SIGTERM
    if platform.system() != "Windows":
        os_signal = signal.SIGKILL
    os.kill(runner.process.pid, os_signal)
def test_performance_mxnet_cpu(mxnet_training, cpu_only):
    ctx = Context()
    python_version = get_py_version(mxnet_training)
    task_name = f"mx_train_single_node_cpu_{python_version}_resnet18v2_cifar10"
    script_url = " https://github.com/awslabs/deeplearning-benchmark.git"
    execute_single_node_benchmark(ctx, mxnet_training, "mxnet", task_name,
                                  python_version, script_url)
def test_torchvision_nms_inference(pytorch_inference):
    """
    Check that the internally built torchvision binary is used to resolve the missing nms issue.
    :param pytorch_inference: framework fixture for pytorch inference
    """
    _, framework_version = get_framework_and_version_from_tag(
        pytorch_inference)
    if Version(framework_version) == Version(
            "1.5.1") and get_processor_from_image_uri(
                pytorch_inference) == "gpu":
        pytest.skip("Skipping this test for PT 1.5.1 GPU Inference DLC images")
    if "eia" in pytorch_inference and Version(framework_version) < Version(
            "1.5.1"):
        pytest.skip(
            "This test does not apply to PT EIA images for PT versions less than 1.5.1"
        )
    if "neuron" in pytorch_inference:
        pytest.skip(
            "Skipping because this is not relevant to PT Neuron images")
    ctx = Context()
    container_name = get_container_name("torchvision-nms", pytorch_inference)
    start_container(container_name, pytorch_inference, ctx)
    run_cmd_on_container(
        container_name,
        ctx,
        f"import torch; import torchvision; print(torch.ops.torchvision.nms)",
        executable="python")
def test_performance_tensorflow_cpu(tensorflow_training, cpu_only):
    ctx = Context()
    python_version = get_py_version(tensorflow_training)
    framework_version = get_framework_version(tensorflow_training)
    task_name = f"tf_train_single_node_{framework_version}_cpu_{python_version}_resnet50_synthetic"
    script_url = "https://github.com/tensorflow/benchmarks.git"
    execute_single_node_benchmark(ctx, tensorflow_training, "tensorflow", task_name, python_version, script_url)
Exemplo n.º 17
0
def download_file(remote_url: str, link_type: str):
    """
    Fetch remote files and save with provided local_path name
    :param link_type: string
    :param remote_url: string
    :return: file_name: string
    """
    LOGGER.info(f"Downloading {remote_url}")

    file_name = os.path.basename(remote_url).strip()
    LOGGER.info(f"basename: {file_name}")

    if link_type in ["s3"] and remote_url.startswith("s3://"):
        match = re.match(r's3:\/\/(.+?)\/(.+)', remote_url)
        if match:
            bucket_name = match.group(1)
            bucket_key = match.group(2)
            LOGGER.info(f"bucket_name: {bucket_name}")
            LOGGER.info(f"bucket_key: {bucket_key}")
            download_s3_file(bucket_name, bucket_key, file_name)
        else:
            raise ValueError(f"Regex matching on s3 URI failed.")
    else:
        ctx = Context()
        ctx.run(f"curl -O {remote_url}")

    return file_name
def _run_eks_mxnet_multinode_training_horovod_mpijob(example_image_uri, cluster_size, eks_gpus_per_worker):
    
    LOGGER.info("Starting run_eks_mxnet_multi_node_training on MNIST dataset using horovod")
    LOGGER.info("The test will run on an example image %s", example_image_uri)

    user = Context().run("echo $USER").stdout.strip("\n")
    random.seed(f"{example_image_uri}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}")
    unique_tag = f"{user}-{random.randint(1, 10000)}"

    namespace = f"mx-multi-node-train-{'py2' if 'py2' in example_image_uri else 'py3'}-{unique_tag}"
    job_name = f"mxnet-mnist-horovod-job-{unique_tag}"

    LOGGER.debug(f"Namespace: {namespace}")

    local_template_file_path = os.path.join(
        "eks",
        "eks_manifest_templates",
        "mxnet",
        "training",
        "multi_node_training_horovod_mnist.yaml"
    )

    remote_yaml_file_path = os.path.join(os.sep, "tmp", f"tensorflow_multi_node_training_{unique_tag}.yaml")

    replace_dict = {
        "<JOB_NAME>": job_name,
        "<NUM_WORKERS>": cluster_size,
        "<CONTAINER_IMAGE>": example_image_uri,
        "<GPUS>": str(eks_gpus_per_worker)
    }

    eks_utils.write_eks_yaml_file_from_template(local_template_file_path, remote_yaml_file_path, replace_dict)

    _run_eks_multi_node_training_mpijob(namespace, job_name, remote_yaml_file_path)
Exemplo n.º 19
0
 def modifications_on_clone_do_not_alter_original(self):
     # Setup
     orig = Call(self.task,
                 called_as='foo',
                 args=[1, 2, 3],
                 kwargs={'key': 'val'})
     context = Context()
     context['setting'] = 'value'
     orig.context = context
     # Clone & tweak
     clone = orig.clone()
     newtask = Task(Mock(__name__='meh'))
     clone.task = newtask
     clone.called_as = 'notfoo'
     clone.args[0] = 7
     clone.kwargs['key'] = 'notval'
     clone.context['setting'] = 'notvalue'
     # Compare
     ok_(clone.task is not orig.task)
     eq_(orig.called_as, 'foo')
     eq_(clone.called_as, 'notfoo')
     eq_(orig.args, [1, 2, 3])
     eq_(clone.args, [7, 2, 3])
     eq_(orig.kwargs['key'], 'val')
     eq_(clone.kwargs['key'], 'notval')
     eq_(orig.context['setting'], 'value')
     eq_(clone.context['setting'], 'notvalue')
Exemplo n.º 20
0
def _print_results_of_test(file_path):
    last_n_lines = Context().run(f"tail -500 {file_path}").stdout.split("\n")
    result_dict = dict()
    accuracy = 0
    time_cost = 0
    accuracy_key = "Train-accuracy"
    time_cost_key = "Time cost"
    reversed_log = reversed(last_n_lines)
    for line in reversed_log:
        if all(key in result_dict for key in ("Train-accuracy", "Time cost")):
            break
        if accuracy_key in line:
            if accuracy_key in result_dict:
                continue
            accuracy_str = line.split("=")[1]
            result_dict[accuracy_key] = accuracy_str
            accuracy = float(accuracy_str)
        if time_cost_key in line:
            if time_cost_key in result_dict:
                continue
            time_str = line.split("=")[1]
            result_dict[time_cost_key] = time_str
            time_cost = float(time_str)
    result = "\n".join(result_dict.values()) + "\n"
    LOGGER.info(f'Result is {result}')
    LOGGER.info(f'{accuracy_key} is {accuracy}')
    LOGGER.info(f'{time_cost_key} is {time_cost}')
    return result, time_cost, accuracy
def test_pip_check(image):
    """
    Ensure there are no broken requirements on the containers by running "pip check"

    :param image: ECR image URI
    """
    ctx = Context()
    gpu_suffix = "-gpu" if "gpu" in image else ""

    # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error
    # to occur in order to catch other pip check issues that may be associated with TF inference
    # smclarify binaries have s3fs->aiobotocore dependency which uses older version of botocore. temporarily
    # allowing this to catch other issues
    allowed_tf_exception = re.compile(
        rf"^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires "
        rf"tensorflow{gpu_suffix}, which is not installed.$")
    allowed_smclarify_exception = re.compile(
        r"^aiobotocore \d+(\.\d+)* has requirement botocore<\d+(\.\d+)*,>=\d+(\.\d+)*, "
        r"but you have botocore \d+(\.\d+)*\.$")

    # Add null entrypoint to ensure command exits immediately
    output = ctx.run(f"docker run --entrypoint='' {image} pip check",
                     hide=True,
                     warn=True)
    if output.return_code != 0:
        if not (allowed_tf_exception.match(output.stdout)
                or allowed_smclarify_exception.match(output.stdout)):
            # Rerun pip check test if this is an unexpected failure
            ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True)
def test_framework_version_cpu(image):
    """
    Check that the framework version in the image tag is the same as the one on a running container.
    This function tests CPU, EIA, and Neuron images.

    :param image: ECR image URI
    """
    if "gpu" in image:
        pytest.skip("GPU images will have their framework version tested in test_framework_and_cuda_version_gpu")
    image_repo_name, _ = get_repository_and_tag_from_image_uri(image)
    if re.fullmatch(r"(pr-|beta-|nightly-)?tensorflow-inference(-eia)?", image_repo_name):
        pytest.skip(msg="TF inference for CPU/GPU/EIA does not have core tensorflow installed")

    tested_framework, tag_framework_version = get_framework_and_version_from_tag(image)

    # Framework name may include huggingface
    tested_framework = tested_framework.lstrip("huggingface_")
    # Module name is torch
    if tested_framework == "pytorch":
        tested_framework = "torch"
    ctx = Context()
    container_name = get_container_name("framework-version", image)
    start_container(container_name, image, ctx)
    output = run_cmd_on_container(
        container_name, ctx, f"import {tested_framework}; print({tested_framework}.__version__)", executable="python"
    )
    if is_canary_context():
        assert tag_framework_version in output.stdout.strip()
    else:
        assert tag_framework_version == output.stdout.strip()
def test_sm_pysdk_2(training):
    """
    Simply verify that we have sagemaker > 2.0 in the python sdk.

    If you find that this test is failing because sm pysdk version is not greater than 2.0, then that means that
    the image under test needs to be updated.

    If you find that the training image under test does not have sagemaker pysdk, it should be added or explicitly
    skipped (with reasoning provided).

    :param training: training ECR image URI
    """

    # Ensure that sm py sdk 2 is on the container
    ctx = Context()
    container_name = _get_container_name("sm_pysdk", training)
    _start_container(container_name, training, ctx)

    sm_version = _run_cmd_on_container(
        container_name,
        ctx,
        "import sagemaker; print(sagemaker.__version__)",
        executable="python").stdout.strip()

    assert Version(sm_version) > Version(
        "2"), f"Sagemaker version should be > 2.0. Found version {sm_version}"
Exemplo n.º 24
0
def test_sagemaker_studio_analytics_extension(training, package_name):
    framework, framework_version = test_utils.get_framework_and_version_from_tag(
        training)
    utility_package_framework_version_limit = {
        "pytorch": SpecifierSet(">=1.7,<1.9"),
        "tensorflow": SpecifierSet(">=2.4,<2.7,!=2.5.*")
    }

    if (framework not in utility_package_framework_version_limit
            or Version(framework_version)
            not in utility_package_framework_version_limit[framework]):
        pytest.skip(
            f"sagemaker_studio_analytics_extension is not installed in {framework} {framework_version} DLCs"
        )

    ctx = Context()
    container_name = test_utils.get_container_name(
        f"sagemaker_studio_analytics_extension-{package_name}", training)
    test_utils.start_container(container_name, training, ctx)

    # Optionally add version validation in the following steps, rather than just printing it.
    test_utils.run_cmd_on_container(container_name, ctx,
                                    f"pip list | grep -i {package_name}")
    import_package = package_name.replace("-", "_")
    import_test_cmd = (f"import {import_package}" if package_name in [
        "sagemaker-studio-sparkmagic-lib",
        "sagemaker-studio-analytics-extension"
    ] else f"import {import_package}; print({import_package}.__version__)")
    test_utils.run_cmd_on_container(container_name,
                                    ctx,
                                    import_test_cmd,
                                    executable="python")
def test_sm_profiler_tf(tensorflow_training):
    if is_tf_version("1", tensorflow_training):
        pytest.skip(
            "Skipping test on TF1, since there are no smprofiler config files for TF1"
        )
    processor = get_processor_from_image_uri(tensorflow_training)
    if processor not in ("cpu", "gpu"):
        pytest.skip(f"Processor {processor} not supported. Skipping test.")

    ctx = Context()

    profiler_tests_dir = os.path.join(
        os.getenv("CODEBUILD_SRC_DIR"),
        get_container_name("smprof", tensorflow_training), "smprofiler_tests")
    ctx.run(f"mkdir -p {profiler_tests_dir}", hide=True)

    # Download sagemaker-tests zip
    sm_tests_zip = "sagemaker-tests.zip"
    ctx.run(
        f"aws s3 cp {os.getenv('SMPROFILER_TESTS_BUCKET')}/{sm_tests_zip} {profiler_tests_dir}/{sm_tests_zip}",
        hide=True)
    ctx.run(f"cd {profiler_tests_dir} && unzip {sm_tests_zip}", hide=True)

    # Install tf datasets
    ctx.run(
        f"echo 'tensorflow-datasets==4.0.1' >> "
        f"{profiler_tests_dir}/sagemaker-tests/tests/scripts/tf_scripts/requirements.txt",
        hide=True,
    )

    run_sm_profiler_tests(tensorflow_training, profiler_tests_dir,
                          "test_profiler_tensorflow.py", processor)
Exemplo n.º 26
0
def test_git_secrets():
    ctx = Context()
    repository_path = os.getenv("CODEBUILD_SRC_DIR")
    if not repository_path:
        repository_path = _recursive_find_repo_path()
    LOGGER.info(f"repository_path = {repository_path}")

    # Replace the regex pattern below with a matching string to run test that makes scan fail:
    SOME_FAKE_CREDENTIALS = "ASIA[A-Z0-9]{16}"
    WHITELISTED_CREDENTIALS = "AKIAIOSFODNN7EXAMPLE"
    # End of Test Section

    with ctx.cd(repository_path):
        ctx.run("git clone https://github.com/awslabs/git-secrets.git")
        with ctx.cd("git-secrets"):
            ctx.run("make install")
        ctx.run("git secrets --install")
        ctx.run("git secrets --register-aws")
        output = ctx.run("git secrets --list")
        LOGGER.info(f"\n--COMMAND--\n{output.command}\n"
                    f"--STDOUT--\n{output.stdout}\n"
                    f"--STDERR--\n{output.stderr}\n"
                    f"----------")
        scan_results = ctx.run("git secrets --scan", hide=True, warn=True)
        LOGGER.info(f"\n--COMMAND--\n{scan_results.command}\n"
                    f"--STDOUT--\n{scan_results.stdout}\n"
                    f"--STDERR--\n{scan_results.stderr}"
                    f"----------")
    assert scan_results.ok, scan_results.stderr
def test_python_version(image):
    """
    Check that the python version in the image tag is the same as the one on a running container.

    :param image: ECR image URI
    """
    ctx = Context()
    container_name = f"py-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}"

    py_version = ""
    for tag_split in image.split('-'):
        if tag_split.startswith('py'):
            if len(tag_split) > 3:
                py_version = f"Python {tag_split[2]}.{tag_split[3]}"
            else:
                py_version = f"Python {tag_split[2]}"
    _start_container(container_name, image, ctx)
    output = _run_cmd_on_container(container_name, ctx, "python --version")

    container_py_version = output.stdout
    # Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to
    # stream to stderr, hence the pytorch condition.
    if "Python 2" in py_version or "pytorch" in image:
        container_py_version = output.stderr

    assert py_version in container_py_version, f"Cannot find {py_version} in {container_py_version}"
Exemplo n.º 28
0
def agent(pytestconfig):

    agent_cli = str(Path(pytestconfig.rootdir) / "arduino-create-agent")
    env = {
        # "ARDUINO_DATA_DIR": data_dir,
        # "ARDUINO_DOWNLOADS_DIR": downloads_dir,
        # "ARDUINO_SKETCHBOOK_DIR": data_dir,
    }
    run_context = Context()

    runner = Local(run_context)  # execute a command on the local filesystem

    cd_command = "cd"
    with run_context.prefix(f'{cd_command} ..'):
        runner.run(agent_cli,
                   echo=True,
                   hide=True,
                   warn=True,
                   env=env,
                   asynchronous=True)

        # we give some time to the agent to start and listen to
        # incoming requests
        time.sleep(.5)

        # we block here until the test function using this fixture has returned
        yield runner

    # Kill the runner's process as we finished our test (platform dependent)
    os_signal = signal.SIGTERM
    if platform.system() != "Windows":
        os_signal = signal.SIGKILL
    os.kill(runner.process.pid, os_signal)
Exemplo n.º 29
0
 def _run(cmd_string):
     cli_full_line = "{} {}".format(cli_path, cmd_string)
     run_context = Context()
     with run_context.cd(working_dir):
         return run_context.run(
             cli_full_line, echo=False, hide=True, warn=True, env=env
         )
Exemplo n.º 30
0
 def _call_objs(self, contextualized):
     # Setup
     pre_body, post_body = Mock(), Mock()
     t1 = Task(pre_body, contextualized=contextualized)
     t2 = Task(post_body, contextualized=contextualized)
     t3 = Task(Mock(),
         pre=[call(t1, 5, foo='bar')],
         post=[call(t2, 7, biz='baz')],
     )
     c = Collection(t1=t1, t2=t2, t3=t3)
     e = Executor(collection=c, context=Context())
     e.execute('t3')
     # Pre-task asserts
     args, kwargs = pre_body.call_args
     eq_(kwargs, {'foo': 'bar'})
     if contextualized:
         assert isinstance(args[0], Context)
         eq_(args[1], 5)
     else:
         eq_(args, (5,))
     # Post-task asserts
     args, kwargs = post_body.call_args
     eq_(kwargs, {'biz': 'baz'})
     if contextualized:
         assert isinstance(args[0], Context)
         eq_(args[1], 7)
     else:
         eq_(args, (7,))