def _run(cmd_string): cli_full_line = "{} {}".format(cli_path, cmd_string) run_context = Context() with run_context.cd(working_dir): return run_context.run( cli_full_line, echo=False, hide=True, warn=True, env=env )
def synchronize_code(context: Context): import os import sys # Path /cygdrive/d/cygwin64/bin/rsync rsync_path = os.environ.get("RSYNC_PATH", None) or "rsync" ssh_path = os.environ.get( "SSH_PATH", None ) or "/cygdrive/d/cygwin64/bin/ssh" if sys.platform == "win32" else "ssh" remote_base_path = "/home/pcdinh/code" ssh_identity_file_path = convert_to_posix_path( os.path.normpath("".join(context.ssh_config["identityfile"]))) if sys.platform == "win32": rsync_cmd = r"{} -pthrvz --exclude='.git/' --exclude='.idea/' --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --rsh='{} -i {} -p 22 ' {} {}@{}:{}" context.local( rsync_cmd.format( rsync_path, ssh_path, # context.ssh_config => dict(hostname, port, user, identityfile) ssh_identity_file_path, convert_to_posix_path(get_base_path()), context.ssh_config["user"], context.ssh_config["hostname"], remote_base_path)) else: rsync(context, convert_to_posix_path(get_base_path()), "/home/pcdinh/code", rsync_opts='--chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --verbose')
def _run(cmd: list, custom_working_dir=None, custom_env=None): if cmd is None: cmd = [] quoted_cmd = [f'"{t}"' for t in cmd] if not custom_working_dir: custom_working_dir = working_dir if not custom_env: custom_env = env cli_full_line = '"{}" {}'.format(cli_path, " ".join(quoted_cmd)) run_context = Context() # It might happen that we need to change directories between drives on Windows, # in that case the "/d" flag must be used otherwise directory wouldn't change cd_command = "cd" if platform.system() == "Windows": cd_command += " /d" # Context.cd() is not used since it doesn't work correctly on Windows. # It escapes spaces in the path using "\ " but it doesn't always work, # wrapping the path in quotation marks is the safest approach with run_context.prefix(f'{cd_command} "{custom_working_dir}"'): return run_context.run(cli_full_line, echo=False, hide=True, warn=True, env=custom_env, encoding="utf-8")
def download_file(remote_url: str, link_type: str): """ Fetch remote files and save with provided local_path name :param link_type: string :param remote_url: string :return: file_name: string """ LOGGER.info(f"Downloading {remote_url}") file_name = os.path.basename(remote_url).strip() LOGGER.info(f"basename: {file_name}") if link_type in ["s3"] and remote_url.startswith("s3://"): match = re.match(r's3:\/\/(.+?)\/(.+)', remote_url) if match: bucket_name = match.group(1) bucket_key = match.group(2) LOGGER.info(f"bucket_name: {bucket_name}") LOGGER.info(f"bucket_key: {bucket_key}") download_s3_file(bucket_name, bucket_key, file_name) else: raise ValueError(f"Regex matching on s3 URI failed.") else: ctx = Context() ctx.run(f"curl -O {remote_url}") return file_name
def test_generate_coverage_doc(): """ Test generating the test coverage doc """ test_coverage_file = get_test_coverage_file_path() ctx = Context() # Set DLC_IMAGES to 'test' to avoid image names affecting function metadata (due to parametrization) # Set CODEBUILD_RESOLVED_SOURCE_VERSION to test for ease of running this test locally ctx.run( "export DLC_IMAGES='test' && export CODEBUILD_RESOLVED_SOURCE_VERSION='test' && export BUILD_CONTEXT=''" "&& pytest -s --collect-only --generate-coverage-doc --ignore=container_tests/", hide=True, ) # Ensure that the coverage report is created assert os.path.exists(test_coverage_file), f"Cannot find test coverage report file {test_coverage_file}" # Write test coverage file to S3 if is_mainline_context(): report_bucket = TEST_COVERAGE_REPORT_BUCKET client = boto3.client("s3") with open(test_coverage_file, "rb") as test_file: try: client.put_object(Bucket=report_bucket, Key=os.path.basename(test_coverage_file), Body=test_file) except ClientError as e: LOGGER.error(f"Unable to upload report to bucket {report_bucket}. Error: {e}") raise
def test_sm_profiler_tf(tensorflow_training): if is_tf_version("1", tensorflow_training): pytest.skip( "Skipping test on TF1, since there are no smprofiler config files for TF1" ) processor = get_processor_from_image_uri(tensorflow_training) if processor not in ("cpu", "gpu"): pytest.skip(f"Processor {processor} not supported. Skipping test.") ctx = Context() profiler_tests_dir = os.path.join( os.getenv("CODEBUILD_SRC_DIR"), get_container_name("smprof", tensorflow_training), "smprofiler_tests") ctx.run(f"mkdir -p {profiler_tests_dir}", hide=True) # Download sagemaker-tests zip sm_tests_zip = "sagemaker-tests.zip" ctx.run( f"aws s3 cp {os.getenv('SMPROFILER_TESTS_BUCKET')}/{sm_tests_zip} {profiler_tests_dir}/{sm_tests_zip}", hide=True) ctx.run(f"cd {profiler_tests_dir} && unzip {sm_tests_zip}", hide=True) # Install tf datasets ctx.run( f"echo 'tensorflow-datasets==4.0.1' >> " f"{profiler_tests_dir}/sagemaker-tests/tests/scripts/tf_scripts/requirements.txt", hide=True, ) run_sm_profiler_tests(tensorflow_training, profiler_tests_dir, "test_profiler_tensorflow.py", processor)
def daemon_runner(pytestconfig, data_dir, downloads_dir, working_dir): """ Provide an invoke's `Local` object that has started the arduino-cli in daemon mode. This way is simple to start and kill the daemon when the test is finished via the kill() function Useful reference: http://docs.pyinvoke.org/en/1.4/api/runners.html#invoke.runners.Local http://docs.pyinvoke.org/en/1.4/api/runners.html """ cli_full_line = os.path.join(str(pytestconfig.rootdir), "..", "arduino-cli daemon") env = { "ARDUINO_DATA_DIR": data_dir, "ARDUINO_DOWNLOADS_DIR": downloads_dir, "ARDUINO_SKETCHBOOK_DIR": data_dir, } os.makedirs(os.path.join(data_dir, "packages")) run_context = Context() run_context.cd(working_dir) # Local Class is the implementation of a Runner abstract class runner = Local(run_context) runner.run(cli_full_line, echo=False, hide=True, warn=True, env=env, asynchronous=True) # we block here until the test function using this fixture has returned yield runner # Kill the runner's process as we finished our test (platform dependent) os_signal = signal.SIGTERM if platform.system() != "Windows": os_signal = signal.SIGKILL os.kill(runner.process.pid, os_signal)
def run_command( context: Context, user: str, remote: bool, instance: Optional[str], stack: Optional[str], command: str, compose: bool = True, ): host = get_host(remote) instance = get_instance(remote, instance) stack = get_stack(remote, instance, stack) if compose: command = f"{COMPOSE_CMD} -f {stack} {command}" info(f"{host}/{instance}/{stack}\n{command}") try: if remote: with get_connection(user, HOST) as c: with c.cd(f"{HOST_PATH}/{instance}"): c.run(command, pty=True) else: context.run(command, replace_env=False, pty=True) except (AuthenticationException, ValueError) as e: error(f"{e}") except (Failure, ThreadException, UnexpectedExit): error(f"{host}/{instance}\nFailed to run command: `{command}`")
def test_pip_check(image): """ Ensure there are no broken requirements on the containers by running "pip check" :param image: ECR image URI """ ctx = Context() gpu_suffix = "-gpu" if "gpu" in image else "" # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error # to occur in order to catch other pip check issues that may be associated with TF inference # smclarify binaries have s3fs->aiobotocore dependency which uses older version of botocore. temporarily # allowing this to catch other issues allowed_tf_exception = re.compile( rf"^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires " rf"tensorflow{gpu_suffix}, which is not installed.$") allowed_smclarify_exception = re.compile( r"^aiobotocore \d+(\.\d+)* has requirement botocore<\d+(\.\d+)*,>=\d+(\.\d+)*, " r"but you have botocore \d+(\.\d+)*\.$") # Add null entrypoint to ensure command exits immediately output = ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True, warn=True) if output.return_code != 0: if not (allowed_tf_exception.match(output.stdout) or allowed_smclarify_exception.match(output.stdout)): # Rerun pip check test if this is an unexpected failure ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True)
def test_normal(): """Test test_watch.""" ctx = Context() ctx.run = MagicMock() assert isinstance(run_test_normal, types.FunctionType) run_test_normal(ctx, 8) ctx.run.assert_called_once()
def test_clean(): """Test clean task.""" ctx = Context() ctx.run = MagicMock() assert type(clean) == Task clean(ctx) ctx.run.assert_called_once()
def run_sagemaker_test_in_executor(image, num_of_instances, instance_type): """ Run pytest in a virtual env for a particular image Expected to run under multi-threading :param num_of_instances: <int> number of instances the image test requires :param instance_type: type of sagemaker instance the test needs :param image: ECR url :return: """ import log_return LOGGER.info("Started running SageMaker test.....") pytest_command, path, tag, job_type = sm_utils.generate_sagemaker_pytest_cmd( image, "sagemaker") # update resource pool accordingly, then add a try-catch statement here to update the pool in case of failure try: log_return.update_pool("running", instance_type, num_of_instances, job_type) context = Context() with context.cd(path): context.run(f"python3 -m virtualenv {tag}") with context.prefix(f"source {tag}/bin/activate"): context.run("pip install -r requirements.txt", warn=True) context.run(pytest_command) except Exception as e: LOGGER.error(e) return False return True
def run(command): """Execute a command with Invoke.""" ctx = Context() ctx.run( command, echo=True, # To improve User eXperience pty=True, # To get colors in output )
def __init__(self, container_id, ignore_dict={}): self.container_id = container_id self.vulnerability_dict = {} self.vulnerability_list = [] self.ignore_dict = ignore_dict self.ignored_vulnerability_count = {} self.ctx = Context() self.docker_exec_cmd = f"docker exec -i {container_id}" self.safety_check_output = None
def build_bai_docker_container(): """ Builds docker container with necessary script requirements (bash 5.0+,conda) """ # Assuming we are in dlc_tests directory docker_dir = os.path.join("benchmark", "bai", "docker") ctx = Context() with ctx.cd(docker_dir): ctx.run("docker build -t bai_env_container -f Dockerfile .")
def test_pip_check(image): """ Ensure there are no broken requirements on the containers by running "pip check" :param image: ECR image URI """ ctx = Context() gpu_suffix = "-gpu" if "gpu" in image else "" allowed_exception_list = [] # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error # to occur in order to catch other pip check issues that may be associated with TF inference # smclarify binaries have s3fs->aiobotocore dependency which uses older version of botocore. temporarily # allowing this to catch other issues allowed_tf_exception = re.compile( rf"^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires tensorflow{gpu_suffix}, which is not installed.$" ) allowed_exception_list.append(allowed_tf_exception) allowed_smclarify_exception = re.compile( r"^aiobotocore \d+(\.\d+)* has requirement botocore<\d+(\.\d+)*,>=\d+(\.\d+)*, " r"but you have botocore \d+(\.\d+)*\.$") allowed_exception_list.append(allowed_smclarify_exception) # The v0.22 version of tensorflow-io has a bug fixed in v0.23 https://github.com/tensorflow/io/releases/tag/v0.23.0 allowed_habana_tf_exception = re.compile( rf"^tensorflow-io 0.22.0 requires tensorflow, which is not installed.$" ) allowed_exception_list.append(allowed_habana_tf_exception) framework, framework_version = get_framework_and_version_from_tag(image) # The v0.21 version of tensorflow-io has a bug fixed in v0.23 https://github.com/tensorflow/io/releases/tag/v0.23.0 if framework == "tensorflow" or framework == "huggingface_tensorflow" and Version( framework_version) in SpecifierSet(">=2.6.3,<2.7"): allowed_tf263_exception = re.compile( rf"^tensorflow-io 0.21.0 requires tensorflow, which is not installed.$" ) allowed_exception_list.append(allowed_tf263_exception) if "autogluon" in image and (("0.3.1" in image) or ("0.3.2" in image)): allowed_autogluon_exception = re.compile( rf"autogluon-(vision|mxnet) 0.3.1 has requirement Pillow<8.4.0,>=8.3.0, but you have pillow \d+(\.\d+)*" ) allowed_exception_list.append(allowed_autogluon_exception) # Add null entrypoint to ensure command exits immediately output = ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True, warn=True) if output.return_code != 0: if not (any([ allowed_exception.match(output.stdout) for allowed_exception in allowed_exception_list ])): # Rerun pip check test if this is an unexpected failure ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True)
def test_test_with_watch(mocker): """Test test task with --watch option.""" mocker.patch('tasks.run_test_watch', side_effect=run_simple_test) tasks = importlib.import_module('tasks') ctx = Context() ctx.run = MagicMock() assert type(tasks.test) == Task tasks.test(ctx, watch=True, n=6) ctx.run.assert_called_once_with('ok') tasks.run_test_watch.assert_called_once_with(ctx, 6)
def test_test(mocker): """Test test task.""" mocker.patch('tasks.run_test_normal', side_effect=run_simple_test) tasks = importlib.import_module('tasks') ctx = Context() ctx.run = MagicMock() assert type(tasks.test) == Task tasks.test(ctx) ctx.run.assert_called_once_with('ok') tasks.run_test_normal.assert_called_once_with(ctx, 4)
def _run_background(context: Context, command, out_file="/dev/null", err_file=None, shell="/bin/bash", pty=False): # Re: nohup {} >{} 2>{} </dev/null & cmd = 'nohup {} >{} 2>{} &'.format(command, out_file, err_file or '&1') print("Running: {}".format(cmd)) context.run(cmd, shell=shell, pty=pty, warn=True)
def makemigrations(context: Context = CONTEXT, noninteractive: bool = False): """Safely create migrations.""" interactive = not noninteractive make_migrations = True if interactive: print('Doing a dry run first...') context.run('python manage.py makemigrations --dry-run') make_migrations = input('^ Do these changes look OK? [Y/n]') != NEGATIVE if make_migrations: context.run('python manage.py makemigrations')
def test_binary_visibility(image: str): """ Test to check if the binary built with image is public/private. Assumes that URIs beginning with 's3://' are private. This will mandate specifying all public links as ones beginning with 'https://'. While s3 objects beginning with 'https://' may still be private, codebuild 'build' job uses 'curl' i.e. unsigned request to fetch them and hence should fail if an 'https://' link is still private """ ctx = Context() labels = json.loads(ctx.run("docker inspect --format='{{json .Config.Labels}}' " + image).stdout.strip()) for label_name, label_value in labels.items(): if "uri" in label_name.lower(): assert label_value.startswith("https://")
def test_eks_mxnet_multi_node_training_horovod_mnist(mxnet_training, example_only): """Run MXNet distributed training on EKS using docker images with MNIST dataset""" ctx = Context() eks_cluster_size = 3 ec2_instance_type = "p3.16xlarge" cluster_name = eks_utils.PR_EKS_CLUSTER_NAME_TEMPLATE.format("mxnet") assert eks_utils.is_eks_cluster_active( cluster_name), f"EKS Cluster {cluster_name} is inactive. Exiting test" eks_gpus_per_worker = ec2_utils.get_instance_num_gpus( instance_type=ec2_instance_type) LOGGER.info( "Starting run_eks_mxnet_multi_node_training on MNIST dataset using horovod" ) LOGGER.info("The test will run on an example image %s", mxnet_training) user = ctx.run("echo $USER").stdout.strip("\n") random.seed( f"{mxnet_training}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}" ) unique_tag = f"{user}-{random.randint(1, 10000)}" namespace = f"mx-multi-node-train-{'py2' if 'py2' in mxnet_training else 'py3'}-{unique_tag}" app_name = f"kubeflow-mxnet-hvd-mpijob-{unique_tag}" job_name = f"mxnet-mnist-horovod-job={unique_tag}" command_to_run = "mpirun,-mca,btl_tcp_if_exclude,lo,-mca,pml,ob1,-mca,btl,^openib,--bind-to,none,-map-by,slot," \ "-x,LD_LIBRARY_PATH,-x,PATH,-x,NCCL_SOCKET_IFNAME=eth0,-x,NCCL_DEBUG=INFO,python," \ "/horovod/examples/mxnet_mnist.py" args_to_pass = "******" home_dir = ctx.run("echo $HOME").stdout.strip("\n") path_to_ksonnet_app = os.path.join( home_dir, f"mxnet_multi_node_hvd_eks_test-{unique_tag}") LOGGER.debug(f"Namespace: {namespace}") # return training_result result = _run_eks_multi_node_training_mpijob(namespace, app_name, mxnet_training, job_name, command_to_run, args_to_pass, path_to_ksonnet_app, eks_cluster_size, eks_gpus_per_worker) return result
def test_python_version(image): """ Check that the python version in the image tag is the same as the one on a running container. :param image: ECR image URI """ ctx = Context() container_name = f"py-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}" py_version = "" for tag_split in image.split('-'): if tag_split.startswith('py'): if len(tag_split) > 3: py_version = f"Python {tag_split[2]}.{tag_split[3]}" else: py_version = f"Python {tag_split[2]}" _start_container(container_name, image, ctx) output = _run_cmd_on_container(container_name, ctx, "python --version") container_py_version = output.stdout # Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to # stream to stderr, hence the pytorch condition. if "Python 2" in py_version or "pytorch" in image: container_py_version = output.stderr assert py_version in container_py_version, f"Cannot find {py_version} in {container_py_version}"
def test_framework_version_cpu(cpu): """ Check that the framework version in the image tag is the same as the one on a running container. :param cpu: ECR image URI with "cpu" in the name """ image = cpu if "tensorflow-inference" in image: pytest.skip(msg="TF inference does not have core tensorflow installed") tested_framework, tag_framework_version = get_framework_and_version_from_tag( image) # Module name is torch if tested_framework == "pytorch": tested_framework = "torch" ctx = Context() container_name = f"framework-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}" _start_container(container_name, image, ctx) output = _run_cmd_on_container( container_name, ctx, f"import {tested_framework}; print({tested_framework}.__version__)", executable="python") if is_canary_context(): assert tag_framework_version in output.stdout.strip() else: assert tag_framework_version == output.stdout.strip()
def test_repo_anaconda_not_present(image): """Test to see if all packages installed in the image do not come from repo.anaconda.com""" try: ctx = Context() container_name = test_utils.get_container_name("anaconda", image) test_utils.start_container(container_name, image, ctx) # First check to see if image has conda installed, if not, skip test since no packages installed from conda present conda_present = test_utils.run_cmd_on_container( container_name, ctx, "find . -name conda -not -path \"**/.github/*\"").stdout.strip() if not conda_present: pytest.skip( f"Image {image} does not have conda installed, skipping test.") # Commands are split in 2 because if warn=True, then even if first command fails silently, no error is raised test_utils.run_cmd_on_container( container_name, ctx, "conda list --explicit > repo_list.txt") grep_result = test_utils.run_cmd_on_container( container_name, ctx, "grep repo.anaconda.com repo_list.txt", warn=True).stdout.strip() if grep_result: raise RuntimeError( f"Image {image} contains packages installed from repo.anaconda.com. " f"Please ensure that these packages are obtained through conda-forge or other alternatives: {grep_result}" ) finally: test_utils.stop_and_remove_container(container_name, ctx)
def test_sm_pysdk_2(training): """ Simply verify that we have sagemaker > 2.0 in the python sdk. If you find that this test is failing because sm pysdk version is not greater than 2.0, then that means that the image under test needs to be updated. If you find that the training image under test does not have sagemaker pysdk, it should be added or explicitly skipped (with reasoning provided). :param training: training ECR image URI """ # Ensure that sm py sdk 2 is on the container ctx = Context() container_name = _get_container_name("sm_pysdk", training) _start_container(container_name, training, ctx) sm_version = _run_cmd_on_container( container_name, ctx, "import sagemaker; print(sagemaker.__version__)", executable="python").stdout.strip() assert Version(sm_version) > Version( "2"), f"Sagemaker version should be > 2.0. Found version {sm_version}"
def test_pandas(image): """ It's possible that in newer python versions, we may have issues with installing pandas due to lack of presence of the bz2 module in py3 containers. This is a sanity test to ensure that pandas import works properly in all containers. :param image: ECR image URI """ ctx = Context() container_name = _get_container_name("pandas", image) _start_container(container_name, image, ctx) # Make sure we can install pandas, do not fail right away if there are pip check issues _run_cmd_on_container(container_name, ctx, "pip install pandas", warn=True) pandas_import_output = _run_cmd_on_container(container_name, ctx, "import pandas", executable="python") assert ( not pandas_import_output.stdout.strip() ), f"Expected no output when importing pandas, but got {pandas_import_output.stdout}" # Simple import test to ensure we do not get a bz2 module import failure _run_cmd_on_container(container_name, ctx, "import pandas; print(pandas.__version__)", executable="python")
def modifications_on_clone_do_not_alter_original(self): # Setup orig = Call(self.task, called_as='foo', args=[1, 2, 3], kwargs={'key': 'val'}) context = Context() context['setting'] = 'value' orig.context = context # Clone & tweak clone = orig.clone() newtask = Task(Mock(__name__='meh')) clone.task = newtask clone.called_as = 'notfoo' clone.args[0] = 7 clone.kwargs['key'] = 'notval' clone.context['setting'] = 'notvalue' # Compare ok_(clone.task is not orig.task) eq_(orig.called_as, 'foo') eq_(clone.called_as, 'notfoo') eq_(orig.args, [1, 2, 3]) eq_(clone.args, [7, 2, 3]) eq_(orig.kwargs['key'], 'val') eq_(clone.kwargs['key'], 'notval') eq_(orig.context['setting'], 'value') eq_(clone.context['setting'], 'notvalue')
def test_dataclasses_check(image): """ Ensure there is no dataclasses pip package is installed for python 3.7 and above version. Python version retrieved from the ecr image uri is expected in the format `py<major_verion><minor_version>` :param image: ECR image URI """ ctx = Context() pip_package = "dataclasses" container_name = get_container_name("dataclasses-check", image) python_version = get_python_version_from_image_uri(image).replace("py", "") python_version = int(python_version) if python_version >= 37: start_container(container_name, image, ctx) output = run_cmd_on_container(container_name, ctx, f"pip show {pip_package}", warn=True) if output.return_code == 0: pytest.fail( f"{pip_package} package exists in the DLC image {image} that has py{python_version} version which is greater than py36 version" ) else: LOGGER.info( f"{pip_package} package does not exists in the DLC image {image}" ) else: pytest.skip( f"Skipping test for DLC image {image} that has py36 version as {pip_package} is not included in the python framework" )
def run_sagemaker_pytest_cmd(image): """ Run pytest in a virtual env for a particular image Expected to run via multiprocessing :param image: ECR url """ pytest_command, path, tag = generate_sagemaker_pytest_cmd(image) context = Context() with context.cd(path): context.run(f"virtualenv {tag}") with context.prefix(f"source {tag}/bin/activate"): context.run("pip install -r requirements.txt", warn=True) context.run(pytest_command)
class configuration: "Dict-like for config" def setup(self): self.c = Context(config={'foo': 'bar'}) def getitem(self): "___getitem__" eq_(self.c['foo'], 'bar') def get(self): eq_(self.c.get('foo'), 'bar') eq_(self.c.get('biz', 'baz'), 'baz') def keys(self): skip() def update(self): self.c.update({'newkey': 'newval'}) eq_(self.c['newkey'], 'newval')
class configuration_proxy: "Dict-like proxy for self.config" def setup(self): config = Config({'foo': 'bar'}) self.c = Context(config=config) def direct_access_allowed(self): eq_(self.c.config.__class__, Config) eq_(self.c.config['foo'], 'bar') eq_(self.c.config.foo, 'bar') def getitem(self): "___getitem__" eq_(self.c['foo'], 'bar') def getattr(self): "__getattr__" eq_(self.c.foo, 'bar') def get(self): eq_(self.c.get('foo'), 'bar') eq_(self.c.get('biz', 'baz'), 'baz') def keys(self): skip() def values(self): skip() def iter(self): "__iter__" skip() def update(self): self.c.update({'newkey': 'newval'}) eq_(self.c['newkey'], 'newval')
def build_delphi_project(ctx: context.Context, project_filename, config='DEBUG', delphi_version=DEFAULT_DELPHI_VERSION): delphi_versions = { "10.1": {"path": "18.0", "desc": "Delphi 10.1 Seattle"}, "10.2": {"path": "19.0", "desc": "Delphi 10.2 Tokyo"}, "10.3": {"path": "20.0", "desc": "Delphi 10.3 Rio"}, } assert delphi_version in delphi_versions, "Invalid Delphi version: " + delphi_version print("[" + delphi_versions[delphi_version]["desc"] + "] ", end="") version_path = delphi_versions[delphi_version]["path"] rsvars_path = f'C:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat' if not os.path.isfile(rsvars_path): rsvars_path = f'D:\\Program Files (x86)\\Embarcadero\\Studio\\{version_path}\\bin\\rsvars.bat' if not os.path.isfile(rsvars_path): raise Exception("Cannot find rsvars.bat") cmdline = '"' + rsvars_path + '"' + " & msbuild /t:Build /p:Config=" + config + " /p:Platform=Win32 \"" + project_filename + "\"" return ctx.run(cmdline, hide=True, warn=True)
def setup(self): config = Config({'foo': 'bar'}) self.c = Context(config=config)
def setup(self): self.c = Context(config={'foo': 'bar'})