예제 #1
0
def test_git_secrets():
    ctx = Context()
    repository_path = os.getenv("CODEBUILD_SRC_DIR")
    if not repository_path:
        repository_path = _recursive_find_repo_path()
    LOGGER.info(f"repository_path = {repository_path}")

    # Replace the regex pattern below with a matching string to run test that makes scan fail:
    SOME_FAKE_CREDENTIALS = "ASIA[A-Z0-9]{16}"
    WHITELISTED_CREDENTIALS = "AKIAIOSFODNN7EXAMPLE"
    # End of Test Section

    with ctx.cd(repository_path):
        ctx.run("git clone https://github.com/awslabs/git-secrets.git")
        with ctx.cd("git-secrets"):
            ctx.run("make install")
        ctx.run("git secrets --install")
        ctx.run("git secrets --register-aws")
        output = ctx.run("git secrets --list")
        LOGGER.info(f"\n--COMMAND--\n{output.command}\n"
                    f"--STDOUT--\n{output.stdout}\n"
                    f"--STDERR--\n{output.stderr}\n"
                    f"----------")
        scan_results = ctx.run("git secrets --scan", hide=True, warn=True)
        LOGGER.info(f"\n--COMMAND--\n{scan_results.command}\n"
                    f"--STDOUT--\n{scan_results.stdout}\n"
                    f"--STDERR--\n{scan_results.stderr}"
                    f"----------")
    assert scan_results.ok, scan_results.stderr
예제 #2
0
def run_command(
    context: Context,
    user: str,
    remote: bool,
    instance: Optional[str],
    stack: Optional[str],
    command: str,
    compose: bool = True,
):
    host = get_host(remote)
    instance = get_instance(remote, instance)
    stack = get_stack(remote, instance, stack)

    if compose:
        command = f"{COMPOSE_CMD} -f {stack} {command}"

    info(f"{host}/{instance}/{stack}\n{command}")

    try:
        if remote:
            with get_connection(user, HOST) as c:
                with c.cd(f"{HOST_PATH}/{instance}"):
                    c.run(command, pty=True)
        else:
            context.run(command, replace_env=False, pty=True)
    except (AuthenticationException, ValueError) as e:
        error(f"{e}")
    except (Failure, ThreadException, UnexpectedExit):
        error(f"{host}/{instance}\nFailed to run command: `{command}`")
예제 #3
0
def seed(context: Context = CONTEXT, migrate: bool = False):
    """Seed the database."""
    django_container_name = 'django'
    db_volume = 'modularhistory_postgres_data'
    if not os.path.isfile(settings.DB_INIT_FILEPATH):
        raise Exception('Seed does not exist.')
    # Remove the data volume, if it exists
    print('Stopping containers...')
    context.run('docker-compose down')
    print('Wiping postgres data volume...')
    context.run(f'docker volume rm {db_volume}', warn=True)
    # Start up the postgres container, automatically running init.sql.
    print('Initializing postgres data...')
    context.run('docker-compose up -d postgres')
    print('Waiting for Postgres to finish recreating the database...')
    sleep(10)  # Give postgres time to recreate the database.
    if migrate:
        context.run(f'docker-compose run django_helper python manage.py migrate')
    if input('Create a superuser (for testing the website)? [Y/n] ') != NEGATIVE:
        sleep(1)
        instructions = (
            'When prompted, enter the username and password you would like to use '
            'for your superuser account.'
        )
        context.run(
            'docker-compose run django_helper bash -c \''
            f'echo "{instructions}" && python manage.py createsuperuser'
            '\'',
            pty=True,
        )
예제 #4
0
def download_file(remote_url: str, link_type: str):
    """
    Fetch remote files and save with provided local_path name
    :param link_type: string
    :param remote_url: string
    :return: file_name: string
    """
    LOGGER.info(f"Downloading {remote_url}")

    file_name = os.path.basename(remote_url).strip()
    LOGGER.info(f"basename: {file_name}")

    if link_type in ["s3"] and remote_url.startswith("s3://"):
        match = re.match(r's3:\/\/(.+?)\/(.+)', remote_url)
        if match:
            bucket_name = match.group(1)
            bucket_key = match.group(2)
            LOGGER.info(f"bucket_name: {bucket_name}")
            LOGGER.info(f"bucket_key: {bucket_key}")
            download_s3_file(bucket_name, bucket_key, file_name)
        else:
            raise ValueError(f"Regex matching on s3 URI failed.")
    else:
        ctx = Context()
        ctx.run(f"curl -O {remote_url}")

    return file_name
예제 #5
0
def clear_migration_history(context: Context = CONTEXT, app: str = ''):
    """Delete all migration files and fake reverting to migration zero."""
    app_name = app or input('App name: ')
    with transaction.atomic():
        migrations_dir = join(settings.BASE_DIR, 'apps', app_name, MIGRATIONS_DIRNAME)
        n_migrations = len(os.listdir(path=migrations_dir)) - 1
        if n_migrations > MAX_MIGRATION_COUNT:
            # Fake reverting all migrations.
            print(f'\n Clearing migration history for the {app_name} app...')
            result = context.run(
                f'python manage.py migrate {app_name} zero --fake', warn=True
            )
            print()
            print('Migrations after fake reversion:')
            context.run('python manage.py showmigrations')
            if result.ok:
                input('Press enter to continue.')
            else:
                raise Exception(
                    f'Failed to clear migration history for {app_name}: '
                    f'{result.stderr}'
                )
        else:
            print(f'Skipped {app_name} because it only has {n_migrations} migrations.')
    # Remove old migration files.
    if input('\n Proceed to remove migration files? [Y/n] ') != NEGATIVE:
        remove_migrations(context, app=app_name)
예제 #6
0
def test_generate_coverage_doc():
    """
    Test generating the test coverage doc
    """
    test_coverage_file = get_test_coverage_file_path()
    ctx = Context()
    # Set DLC_IMAGES to 'test' to avoid image names affecting function metadata (due to parametrization)
    # Set CODEBUILD_RESOLVED_SOURCE_VERSION to test for ease of running this test locally
    ctx.run(
        "export DLC_IMAGES='' && export CODEBUILD_RESOLVED_SOURCE_VERSION='test' && export BUILD_CONTEXT=''"
        "&& pytest -s --collect-only  --generate-coverage-doc --ignore=container_tests/",
        hide=True,
    )

    # Ensure that the coverage report is created
    assert os.path.exists(test_coverage_file), f"Cannot find test coverage report file {test_coverage_file}"

    # Write test coverage file to S3
    if is_mainline_context():
        client = boto3.client("s3")
        with open(test_coverage_file, "rb") as test_file:
            try:
                client.put_object(Bucket=TEST_COVERAGE_REPORT_BUCKET, Key=os.path.basename(test_coverage_file),
                                  Body=test_file)
            except ClientError as e:
                LOGGER.error(f"Unable to upload report to bucket {TEST_COVERAGE_REPORT_BUCKET}. Error: {e}")
                raise
def test_pip_check(image):
    """
    Ensure there are no broken requirements on the containers by running "pip check"

    :param image: ECR image URI
    """
    ctx = Context()
    gpu_suffix = "-gpu" if "gpu" in image else ""

    # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error
    # to occur in order to catch other pip check issues that may be associated with TF inference
    # smclarify binaries have s3fs->aiobotocore dependency which uses older version of botocore. temporarily
    # allowing this to catch other issues
    allowed_tf_exception = re.compile(
        rf"^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires "
        rf"tensorflow{gpu_suffix}, which is not installed.$")
    allowed_smclarify_exception = re.compile(
        r"^aiobotocore \d+(\.\d+)* has requirement botocore<\d+(\.\d+)*,>=\d+(\.\d+)*, "
        r"but you have botocore \d+(\.\d+)*\.$")

    # Add null entrypoint to ensure command exits immediately
    output = ctx.run(f"docker run --entrypoint='' {image} pip check",
                     hide=True,
                     warn=True)
    if output.return_code != 0:
        if not (allowed_tf_exception.match(output.stdout)
                or allowed_smclarify_exception.match(output.stdout)):
            # Rerun pip check test if this is an unexpected failure
            ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True)
예제 #8
0
def generate_safety_report_for_image(image_uri,
                                     image_info,
                                     storage_file_path=None):
    """
    Genereate safety scan reports for an image and store it at the location specified 

    :param image_uri: str, consists of f"{image_repo}:{image_tag}"
    :param image_info: dict, should consist of 3 keys - "framework", "python_version" and "image_type".
    :param storage_file_path: str, looks like "storage_location.json"
    :return: list[dict], safety report generated by SafetyReportGenerator
    """
    ctx = Context()
    docker_run_cmd = f"docker run -id --entrypoint='/bin/bash' {image_uri} "
    container_id = ctx.run(f"{docker_run_cmd}", hide=True,
                           warn=True).stdout.strip()
    install_safety_cmd = "pip install safety"
    docker_exec_cmd = f"docker exec -i {container_id}"
    ctx.run(f"{docker_exec_cmd} {install_safety_cmd}", hide=True, warn=True)
    ignore_dict = get_safety_ignore_dict(image_uri, image_info["framework"],
                                         image_info["python_version"],
                                         image_info["image_type"])
    safety_scan_output = SafetyReportGenerator(
        container_id, ignore_dict=ignore_dict).generate()
    ctx.run(f"docker rm -f {container_id}", hide=True, warn=True)
    if storage_file_path:
        with open(storage_file_path, "w", encoding="utf-8") as f:
            json.dump(safety_scan_output, f, indent=4)
    return safety_scan_output
def test_tensorflow_sagemaker_training_performance(tensorflow_training,
                                                   num_nodes, region):

    # This sleep has been inserted because all the parametrized training jobs are automatically created
    # by SageMaker with the same name, due to being started around the same time, and with the same image uri.
    time.sleep(
        random.Random(x=f"{tensorflow_training}{num_nodes}").random() * 60)

    framework_version = re.search(r"[1,2](\.\d+){2}",
                                  tensorflow_training).group()
    processor = "gpu" if "gpu" in tensorflow_training else "cpu"

    ec2_instance_type = "p3.16xlarge" if processor == "gpu" else "c5.18xlarge"

    py_version = "py2" if "py2" in tensorflow_training else "py37" if "py37" in tensorflow_training else "py3"

    time_str = time.strftime('%Y-%m-%d-%H-%M-%S')
    commit_info = os.getenv("CODEBUILD_RESOLVED_SOURCE_VERSION")
    target_upload_location = os.path.join(BENCHMARK_RESULTS_S3_BUCKET,
                                          "tensorflow", framework_version,
                                          "sagemaker", "training", processor,
                                          py_version)

    test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            "resources")
    venv_dir = os.path.join(test_dir, "sm_benchmark_venv")

    ctx = Context()

    with ctx.cd(test_dir), ctx.prefix(f"source {venv_dir}/bin/activate"):
        log_file = f"results-{commit_info}-{time_str}-{num_nodes}-node.txt"
        run_out = ctx.run(
            f"timeout 45m python tf_sm_benchmark.py "
            f"--framework-version {framework_version} "
            f"--image-uri {tensorflow_training} "
            f"--instance-type ml.{ec2_instance_type} "
            f"--node-count {num_nodes} "
            f"--python {py_version} "
            f"--region {region} "
            f"> {log_file}",
            warn=True,
            echo=True)

        if not (run_out.ok or run_out.return_code == 124):
            target_upload_location = os.path.join(target_upload_location,
                                                  "failure_log")

        ctx.run(
            f"aws s3 cp {log_file} {os.path.join(target_upload_location, log_file)}"
        )

    LOGGER.info(
        f"Test results can be found at {os.path.join(target_upload_location, log_file)}"
    )

    assert run_out.ok, (
        f"Benchmark Test failed with return code {run_out.return_code}. "
        f"Test results can be found at {os.path.join(target_upload_location, log_file)}"
    )
예제 #10
0
def run(command):
    """Execute a command with Invoke."""
    ctx = Context()
    ctx.run(
        command,
        echo=True,  # To improve User eXperience
        pty=True,  # To get colors in output
    )
예제 #11
0
def build_bai_docker_container():
    """
    Builds docker container with necessary script requirements (bash 5.0+,conda)
    """
    # Assuming we are in dlc_tests directory
    docker_dir = os.path.join("benchmark", "bai", "docker")
    ctx = Context()
    with ctx.cd(docker_dir):
        ctx.run("docker build -t bai_env_container -f Dockerfile .")
def test_pip_check(image):
    """
    Ensure there are no broken requirements on the containers by running "pip check"

    :param image: ECR image URI
    """
    ctx = Context()
    gpu_suffix = "-gpu" if "gpu" in image else ""
    allowed_exception_list = []

    # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error
    # to occur in order to catch other pip check issues that may be associated with TF inference
    # smclarify binaries have s3fs->aiobotocore dependency which uses older version of botocore. temporarily
    # allowing this to catch other issues
    allowed_tf_exception = re.compile(
        rf"^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires tensorflow{gpu_suffix}, which is not installed.$"
    )
    allowed_exception_list.append(allowed_tf_exception)

    allowed_smclarify_exception = re.compile(
        r"^aiobotocore \d+(\.\d+)* has requirement botocore<\d+(\.\d+)*,>=\d+(\.\d+)*, "
        r"but you have botocore \d+(\.\d+)*\.$")
    allowed_exception_list.append(allowed_smclarify_exception)

    # The v0.22 version of tensorflow-io has a bug fixed in v0.23 https://github.com/tensorflow/io/releases/tag/v0.23.0
    allowed_habana_tf_exception = re.compile(
        rf"^tensorflow-io 0.22.0 requires tensorflow, which is not installed.$"
    )
    allowed_exception_list.append(allowed_habana_tf_exception)

    framework, framework_version = get_framework_and_version_from_tag(image)
    # The v0.21 version of tensorflow-io has a bug fixed in v0.23 https://github.com/tensorflow/io/releases/tag/v0.23.0
    if framework == "tensorflow" or framework == "huggingface_tensorflow" and Version(
            framework_version) in SpecifierSet(">=2.6.3,<2.7"):
        allowed_tf263_exception = re.compile(
            rf"^tensorflow-io 0.21.0 requires tensorflow, which is not installed.$"
        )
        allowed_exception_list.append(allowed_tf263_exception)

    if "autogluon" in image and (("0.3.1" in image) or ("0.3.2" in image)):
        allowed_autogluon_exception = re.compile(
            rf"autogluon-(vision|mxnet) 0.3.1 has requirement Pillow<8.4.0,>=8.3.0, but you have pillow \d+(\.\d+)*"
        )
        allowed_exception_list.append(allowed_autogluon_exception)

    # Add null entrypoint to ensure command exits immediately
    output = ctx.run(f"docker run --entrypoint='' {image} pip check",
                     hide=True,
                     warn=True)
    if output.return_code != 0:
        if not (any([
                allowed_exception.match(output.stdout)
                for allowed_exception in allowed_exception_list
        ])):
            # Rerun pip check test if this is an unexpected failure
            ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True)
예제 #13
0
def _run_background(context: Context,
                    command,
                    out_file="/dev/null",
                    err_file=None,
                    shell="/bin/bash",
                    pty=False):
    # Re: nohup {} >{} 2>{} </dev/null &
    cmd = 'nohup {} >{} 2>{} &'.format(command, out_file, err_file or '&1')
    print("Running: {}".format(cmd))
    context.run(cmd, shell=shell, pty=pty, warn=True)
예제 #14
0
def makemigrations(context: Context = CONTEXT, noninteractive: bool = False):
    """Safely create migrations."""
    interactive = not noninteractive
    make_migrations = True
    if interactive:
        print('Doing a dry run first...')
        context.run('python manage.py makemigrations --dry-run')
        make_migrations = input('^ Do these changes look OK? [Y/n]') != NEGATIVE
    if make_migrations:
        context.run('python manage.py makemigrations')
예제 #15
0
파일: common.py 프로젝트: alxshine/eNNclave
def build_library(model: Enclave, mode: str):
    model.generate_state()
    model.generate_forward(mode, )
    context = Context()
    with context.cd(cfg.get_ennclave_home()):
        if mode == 'sgx':
            model.generate_config()
            context.run('build/backend_sgx_encryptor')

        with context.cd("build"):  # TODO: make more robust
            context.run(f"make backend_{mode}")
def test_eks_mxnet_multi_node_training_horovod_mnist(mxnet_training,
                                                     example_only):
    """Run MXNet distributed training on EKS using docker images with MNIST dataset"""

    ctx = Context()

    eks_cluster_size = 3
    ec2_instance_type = "p3.16xlarge"
    cluster_name = eks_utils.PR_EKS_CLUSTER_NAME_TEMPLATE.format("mxnet")

    assert eks_utils.is_eks_cluster_active(
        cluster_name), f"EKS Cluster {cluster_name} is inactive. Exiting test"

    eks_gpus_per_worker = ec2_utils.get_instance_num_gpus(
        instance_type=ec2_instance_type)

    LOGGER.info(
        "Starting run_eks_mxnet_multi_node_training on MNIST dataset using horovod"
    )
    LOGGER.info("The test will run on an example image %s", mxnet_training)

    user = ctx.run("echo $USER").stdout.strip("\n")
    random.seed(
        f"{mxnet_training}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}"
    )
    unique_tag = f"{user}-{random.randint(1, 10000)}"

    namespace = f"mx-multi-node-train-{'py2' if 'py2' in mxnet_training else 'py3'}-{unique_tag}"
    app_name = f"kubeflow-mxnet-hvd-mpijob-{unique_tag}"
    job_name = f"mxnet-mnist-horovod-job={unique_tag}"

    command_to_run = "mpirun,-mca,btl_tcp_if_exclude,lo,-mca,pml,ob1,-mca,btl,^openib,--bind-to,none,-map-by,slot," \
                     "-x,LD_LIBRARY_PATH,-x,PATH,-x,NCCL_SOCKET_IFNAME=eth0,-x,NCCL_DEBUG=INFO,python," \
                     "/horovod/examples/mxnet_mnist.py"
    args_to_pass = "******"
    home_dir = ctx.run("echo $HOME").stdout.strip("\n")
    path_to_ksonnet_app = os.path.join(
        home_dir, f"mxnet_multi_node_hvd_eks_test-{unique_tag}")

    LOGGER.debug(f"Namespace: {namespace}")

    # return training_result
    result = _run_eks_multi_node_training_mpijob(namespace, app_name,
                                                 mxnet_training, job_name,
                                                 command_to_run, args_to_pass,
                                                 path_to_ksonnet_app,
                                                 eks_cluster_size,
                                                 eks_gpus_per_worker)

    return result
def test_sm_profiler_tf(tensorflow_training):
    if is_tf_version("1", tensorflow_training):
        pytest.skip(
            "Skipping test on TF1, since there are no smprofiler config files for TF1"
        )
    processor = get_processor_from_image_uri(tensorflow_training)
    if processor not in ("cpu", "gpu"):
        pytest.skip(f"Processor {processor} not supported. Skipping test.")

    ctx = Context()

    profiler_tests_dir = os.path.join(
        os.getenv("CODEBUILD_SRC_DIR"),
        get_container_name("smprof", tensorflow_training), "smprofiler_tests")
    ctx.run(f"mkdir -p {profiler_tests_dir}", hide=True)

    # Download sagemaker-tests zip
    sm_tests_zip = "sagemaker-tests.zip"
    ctx.run(
        f"aws s3 cp {os.getenv('SMPROFILER_TESTS_BUCKET')}/{sm_tests_zip} {profiler_tests_dir}/{sm_tests_zip}",
        hide=True)
    ctx.run(f"cd {profiler_tests_dir} && unzip {sm_tests_zip}", hide=True)

    # Install tf datasets
    ctx.run(
        f"echo 'tensorflow-datasets==4.0.1' >> "
        f"{profiler_tests_dir}/sagemaker-tests/tests/scripts/tf_scripts/requirements.txt",
        hide=True,
    )

    run_sm_profiler_tests(tensorflow_training, profiler_tests_dir,
                          "test_profiler_tensorflow.py", processor)
예제 #18
0
def setup_sm_benchmark_mx_train_env(resources_location):
    """
    Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
    :param resources_location: <str> directory in which test resources should be placed
    :return: absolute path to the location of the virtual environment
    """
    ctx = Context()

    venv_dir = os.path.join(resources_location, "sm_benchmark_venv")
    if not os.path.isdir(venv_dir):
        ctx.run(f"virtualenv {venv_dir}")
        with ctx.prefix(f"source {venv_dir}/bin/activate"):
            ctx.run("pip install -U 'sagemaker<2' awscli boto3 botocore")
    return venv_dir
예제 #19
0
def execute_sagemaker_remote_tests(image):
    """
    Run pytest in a virtual env for a particular image
    Expected to run via multiprocessing
    :param image: ECR url
    """
    pytest_command, path, tag, job_type = generate_sagemaker_pytest_cmd(
        image, SAGEMAKER_REMOTE_TEST_TYPE)
    context = Context()
    with context.cd(path):
        context.run(f"virtualenv {tag}")
        with context.prefix(f"source {tag}/bin/activate"):
            context.run("pip install -r requirements.txt", warn=True)
            res = context.run(pytest_command, warn=True)
            metrics_utils.send_test_result_metrics(res.return_code)
예제 #20
0
def build_delphi_project(ctx: context.Context,
                         project_filename,
                         config='DEBUG',
                         delphi_version=DEFAULT_DELPHI_VERSION):
    delphi_versions = {
        "XE7": {
            "path": "15.0",
            "desc": "Delphi XE7"
        },
        "10.1": {
            "path": "18.0",
            "desc": "Delphi 10.1 Seattle"
        },
        "10.2": {
            "path": "19.0",
            "desc": "Delphi 10.2 Tokyo"
        },
        "10.3": {
            "path": "20.0",
            "desc": "Delphi 10.3 Rio"
        },
    }

    assert delphi_version in delphi_versions, "Invalid Delphi version: " + delphi_version
    print("[" + delphi_versions[delphi_version]["desc"] + "] ", end="")
    version_path = delphi_versions[delphi_version]["path"]

    rsvars_path = f'C:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat'
    if not os.path.isfile(rsvars_path):
        rsvars_path = f'D:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat'
        if not os.path.isfile(rsvars_path):
            raise Exception("Cannot find rsvars.bat")
    cmdline = '"' + rsvars_path + '"' + " & msbuild /t:Build /p:Config=" + config + " /p:Platform=Win32 \"" + project_filename + "\""
    return ctx.run(cmdline, hide=True, warn=True)
예제 #21
0
 def _run(cmd_string):
     cli_full_line = "{} {}".format(cli_path, cmd_string)
     run_context = Context()
     with run_context.cd(working_dir):
         return run_context.run(
             cli_full_line, echo=False, hide=True, warn=True, env=env
         )
예제 #22
0
    def _run(cmd: list, custom_working_dir=None, custom_env=None):
        if cmd is None:
            cmd = []
        quoted_cmd = [f'"{t}"' for t in cmd]

        if not custom_working_dir:
            custom_working_dir = working_dir
        if not custom_env:
            custom_env = env
        cli_full_line = '"{}" {}'.format(cli_path, " ".join(quoted_cmd))
        run_context = Context()
        # It might happen that we need to change directories between drives on Windows,
        # in that case the "/d" flag must be used otherwise directory wouldn't change
        cd_command = "cd"
        if platform.system() == "Windows":
            cd_command += " /d"
        # Context.cd() is not used since it doesn't work correctly on Windows.
        # It escapes spaces in the path using "\ " but it doesn't always work,
        # wrapping the path in quotation marks is the safest approach
        with run_context.prefix(f'{cd_command} "{custom_working_dir}"'):
            return run_context.run(cli_full_line,
                                   echo=False,
                                   hide=True,
                                   warn=True,
                                   env=custom_env,
                                   encoding="utf-8")
예제 #23
0
def test_clean():
    """Test clean task."""
    ctx = Context()
    ctx.run = MagicMock()
    assert type(clean) == Task
    clean(ctx)
    ctx.run.assert_called_once()
예제 #24
0
def test_normal():
    """Test test_watch."""
    ctx = Context()
    ctx.run = MagicMock()
    assert isinstance(run_test_normal, types.FunctionType)
    run_test_normal(ctx, 8)
    ctx.run.assert_called_once()
예제 #25
0
def build_delphi_project(
    ctx: context.Context,
    project_filename,
    config="DEBUG",
    delphi_version=DEFAULT_DELPHI_VERSION,
):
    delphi_versions = {
        "10": {
            "path": "17.0",
            "desc": "Delphi 10 Seattle"
        },
        "10.1": {
            "path": "18.0",
            "desc": "Delphi 10.1 Berlin"
        },
        "10.2": {
            "path": "19.0",
            "desc": "Delphi 10.2 Tokyo"
        },
        "10.3": {
            "path": "20.0",
            "desc": "Delphi 10.3 Rio"
        },
        "10.4": {
            "path": "21.0",
            "desc": "Delphi 10.4 Sydney"
        },
        "11": {
            "path": "22.0",
            "desc": "Delphi 11 Alexandria"
        },
        "11.1": {
            "path": "22.0",
            "desc": "Delphi 11.1 Alexandria"
        },
    }

    assert delphi_version in delphi_versions, ("Invalid Delphi version: " +
                                               delphi_version)
    print("[" + delphi_versions[delphi_version]["desc"] + "] ", end="")
    version_path = delphi_versions[delphi_version]["path"]

    rsvars_path = (
        f"C:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat"
    )
    if not os.path.isfile(rsvars_path):
        rsvars_path = f"D:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat"
        if not os.path.isfile(rsvars_path):
            raise Exception("Cannot find rsvars.bat")
    cmdline = ('"' + rsvars_path + '"' + " & msbuild /t:Build /p:Config=" +
               config + f' /p:Platform={project_filename[1]} "' +
               project_filename[0] + '"')
    print("\n" + "".join(cmdline))
    r = ctx.run(cmdline, hide=True, warn=True)
    if r.failed:
        print(r.stdout)
        print(r.stderr)
        raise Exit("Build failed for " +
                   delphi_versions[delphi_version]["desc"])
def test_generate_coverage_doc():
    """
    Test generating the test coverage doc
    """
    test_coverage_file = get_test_coverage_file_path()
    ctx = Context()
    # Set DLC_TESTS to 'test' to avoid image names affecting function metadata (due to parametrization)
    # Set CODEBUILD_RESOLVED_SOURCE_VERSION to test for ease of running this test locally
    ctx.run(
        "export DLC_TESTS='test' && export CODEBUILD_RESOLVED_SOURCE_VERSION='test' && export BUILD_CONTEXT=''"
        "&& pytest -s --collect-only  --generate-coverage-doc --ignore=container_tests/",
        hide=True)

    # Ensure that the coverage report is created
    assert os.path.exists(
        test_coverage_file
    ), f"Cannot find test coverage report file {test_coverage_file}"
예제 #27
0
def test_canary_images_pullable(region):
    """
    Sanity test to verify canary specific functions
    """
    ctx = Context()
    frameworks = ("tensorflow", "mxnet", "pytorch")

    # Have a default framework to test on
    framework = "pytorch"
    for fw in frameworks:
        if fw in os.getenv("CODEBUILD_INITIATOR"):
            framework = fw
            break

    images = parse_canary_images(framework, region)
    login_to_ecr_registry(ctx, PUBLIC_DLC_REGISTRY, region)
    for image in images.split(" "):
        ctx.run(f"docker pull {image}", hide=True)
예제 #28
0
def test_test_with_watch(mocker):
    """Test test task with --watch option."""
    mocker.patch('tasks.run_test_watch', side_effect=run_simple_test)
    tasks = importlib.import_module('tasks')
    ctx = Context()
    ctx.run = MagicMock()
    assert type(tasks.test) == Task
    tasks.test(ctx, watch=True, n=6)
    ctx.run.assert_called_once_with('ok')
    tasks.run_test_watch.assert_called_once_with(ctx, 6)
예제 #29
0
def common(backend: str):
    target_dir = join(cfg.get_ennclave_home(), 'backend', 'generated')

    preamble_backend = backend
    if backend == 'sgx':
        preamble_backend = 'sgx_enclave'

    with open(join(target_dir, f'{backend}_forward.cpp'), 'w+') as forward_file:
        forward_file.write(templates.preamble.render(backend=preamble_backend))
        forward_file.write(
            f"print_out(\"Hello, this is backend {backend}\\n\");")
        forward_file.write(templates.postamble)

    with open(join(target_dir, 'parameters.bin'), 'w') as parameter_file:
        pass

    with open(join(target_dir, 'sgx_config.xml'), 'w') as config_file:
        config_file.write("""     
<EnclaveConfiguration>
  <ProdID>0</ProdID>
  <ISVSVN>0</ISVSVN>
  <StackMaxSize>0x40000</StackMaxSize>
  <HeapInitSize>0x7e00000</HeapInitSize>
  <HeapMaxSize>0x7e00000</HeapMaxSize>
  <TCSNum>10</TCSNum>
  <TCSPolicy>1</TCSPolicy>
  <!-- Recommend changing 'DisableDebug' to 1 to make the sgx undebuggable for sgx release -->
  <DisableDebug>0</DisableDebug>
  <MiscSelect>0</MiscSelect>
  <MiscMask>0xFFFFFFFF</MiscMask>
</EnclaveConfiguration>""")

    context = Context()
    with context.cd(cfg.get_ennclave_home()):
        context.run('mkdir -p build')
        with context.cd('build'):
            # context.run('cmake ..')
            context.run(f'make backend_{backend}')

    if backend == 'native':
        ennclave.native_forward(b'', 0, 0)
    else:
        ennclave.sgx_forward(b'', 0, 0)
예제 #30
0
def test_test(mocker):
    """Test test task."""
    mocker.patch('tasks.run_test_normal', side_effect=run_simple_test)
    tasks = importlib.import_module('tasks')
    ctx = Context()
    ctx.run = MagicMock()
    assert type(tasks.test) == Task
    tasks.test(ctx)
    ctx.run.assert_called_once_with('ok')
    tasks.run_test_normal.assert_called_once_with(ctx, 4)
예제 #31
0
def build_delphi_project(ctx: context.Context, project_filename, config='DEBUG', delphi_version=DEFAULT_DELPHI_VERSION):
    delphi_versions = {
        "10.1": {"path": "18.0", "desc": "Delphi 10.1 Seattle"},
        "10.2": {"path": "19.0", "desc": "Delphi 10.2 Tokyo"},
        "10.3": {"path": "20.0", "desc": "Delphi 10.3 Rio"},
    }

    assert delphi_version in delphi_versions, "Invalid Delphi version: " + delphi_version
    print("[" + delphi_versions[delphi_version]["desc"] + "] ", end="")
    version_path = delphi_versions[delphi_version]["path"]

    rsvars_path = f'C:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat'
    if not os.path.isfile(rsvars_path):
        rsvars_path = f'D:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat'
        if not os.path.isfile(rsvars_path):
            raise Exception("Cannot find rsvars.bat")
    cmdline = '"' + rsvars_path + '"' + " & msbuild /t:Build /p:Config=" + config + " /p:Platform=Win32 \"" + project_filename + "\""
    return ctx.run(cmdline, hide=True, warn=True)