Beispiel #1
0
def _lint_docstyle(ctx: Context, path: Optional[List[str]] = None):
    """Run docstring linting on source code.

    Docstring linting is done via pydocstyle. The pydocstyle config can be found in the `.pydocstyle` file.
    This ensures compliance with PEP 257, with a few exceptions. Note that pylint also carries out additional
    docstyle checks.

    Args:
        ctx: Context
        path: Path override. Run tests only on given paths.
    """
    print_header("documentation style", level=2)
    ensure_reports_dir()

    paths = to_pathlib_path(path, [
        PROJECT_INFO.source_directory, PROJECT_INFO.tests_directory,
        PROJECT_INFO.tasks_directory
    ])

    try:
        ctx.run(
            f"pydocstyle {paths_to_str(paths)} > {REPORT_PYDOCSTYLE_FPATH}")
    finally:
        if os.path.exists(REPORT_PYDOCSTYLE_FPATH):
            format_messages(read_contents(REPORT_PYDOCSTYLE_FPATH))
Beispiel #2
0
    def build(self, ctx: Context) -> None:
        if self.platform in [
                SupportedPlatformEnum.WINDOWS_32,
                SupportedPlatformEnum.WINDOWS_64
        ]:
            if self.platform == SupportedPlatformEnum.WINDOWS_32:
                arch = "x86"
                build_script = "bld_ml32.bat"
                build_platform = "Win32"
            else:
                arch = "x64"
                build_script = "bld_ml64.bat"
                build_platform = "x64"

            masm_path = self.src_path / "contrib" / f"masm{arch}"
            with ctx.cd(str(masm_path)):
                ctx.run(build_script)
                ctx.run(
                    f"msbuild ..\\vstudio\\vc14\\zlibvc.sln /P:Configuration=Release /P:Platform={build_platform}"
                )

        else:
            # Linux/macOS build
            with ctx.cd(str(self.src_path)):
                ctx.run('CFLAGS="-fPIC" ./configure -static')
                ctx.run("make clean")
                ctx.run("make")
def upgrade(
    ctx: Context,
    pyupgrade: bool = True,
    flynt: bool = True,
):
    """
    Upgrade the codebase with *pyupgrade* and *flynt*.

    Parameters
    ----------
    ctx
        Context.
    pyupgrade
        Whether to upgrade the codebase with *pyupgrade*.
    flynt
        Whether to upgrade the codebase with *flynt*.
    """

    if pyupgrade:
        print('Upgrading codebase with "pyupgrade"...')
        ctx.run("pre-commit run pyupgrade --all-files")

    if flynt:
        print('Upgrading codebase with "flynt"...')
        ctx.run("flynt .")
Beispiel #4
0
def _auto_install_pandas(c: Context):
    try:
        import pandas
        print("success: import pandas")
    except Exception as e:
        print(str(e))
        c.run(sys.executable + " -m pip install pandas")
Beispiel #5
0
    def build(self, ctx: Context) -> None:
        if self.platform in [
                SupportedPlatformEnum.WINDOWS_32,
                SupportedPlatformEnum.WINDOWS_64
        ]:
            if self.platform == SupportedPlatformEnum.WINDOWS_32:
                arch = 'x86'
                build_script = 'bld_ml32.bat'
                build_platform = 'Win32'
            else:
                arch = 'x64'
                build_script = 'bld_ml64.bat'
                build_platform = 'x64'

            masm_path = self.src_path / 'contrib' / f'masm{arch}'
            with ctx.cd(str(masm_path)):
                ctx.run(build_script)
                ctx.run(
                    f'msbuild ..\\vstudio\\vc14\\zlibvc.sln /P:Configuration=Release /P:Platform={build_platform}'
                )

        else:
            # Linux/macOS build
            with ctx.cd(str(self.src_path)):
                ctx.run('CFLAGS="-fPIC" ./configure -static')
                ctx.run('make clean')
                ctx.run('make')
Beispiel #6
0
def quality(
    ctx: Context,
    mypy: Boolean = True,
    rstlint: Boolean = True,
):
    """Check the codebase with *Mypy* and lints various *restructuredText*
    files with *rst-lint*.

    Parameters
    ----------
    ctx
        Context.
    flake8
        Whether to check the codebase with *Flake8*.
    mypy
        Whether to check the codebase with *Mypy*.
    rstlint
        Whether to lint various *restructuredText* files with *rst-lint*.
    """

    if mypy:
        message_box('Checking codebase with "Mypy"...')
        ctx.run(f"mypy "
                f"--install-types "
                f"--non-interactive "
                f"--show-error-codes "
                f"--warn-unused-ignores "
                f"--warn-redundant-casts "
                f"-p {PYTHON_PACKAGE_NAME} "
                f"|| true")

    if rstlint:
        message_box('Linting "README.rst" file...')
        ctx.run("rst-lint README.rst")
Beispiel #7
0
def config_fabric():
    """ fabric 配置文件:当前目录、用户目录、全局目录、工程目录(默认配置)
        只有配置在 ~/.fabric.yaml 和 /etc/fabric.yaml 的才能真正生效(此处选择~/.fabric.yaml)

        1. 比较找到的 fabric.yaml 和 ~/.fabric.yaml 的差别
        2. 需要时,将 fabric.yaml 复制到 ~/.fabric.yaml
    """
    from invoke import Context
    c = Context()

    name = 'fabric.yaml'
    src = search_config(name)
    dst = os.path.join(globing.user, '.' + name)

    restart = False
    if not file_exist(c, dst, echo=False):
        print('copy config, try next time!')
        restart = True

    elif c.run("diff {} {}".format(src, dst), warn=True, echo=False).failed:
        print("update config, try next time!")
        restart = True

    if restart:
        c.run("\cp {} {}".format(src, dst))
        exit(-1)
Beispiel #8
0
    def handle_label(self, dataset_label, **options):
        dataset = Dataset.objects.get(name=dataset_label)

        destination = get_dumps_path(dataset)
        if not os.path.exists(destination):
            os.makedirs(destination)
        dataset_file = os.path.join(
            destination, "{}.{}.json".format(dataset.name, dataset.id))
        with open(dataset_file, "w") as json_file:
            object_to_disk(dataset, json_file)
            queryset_to_disk(dataset.harvestsource_set, json_file)
            queryset_to_disk(dataset.harvest_set, json_file)
            queryset_to_disk(dataset.versions.filter(is_current=True),
                             json_file)
            for version in dataset.versions.filter(is_current=True):
                queryset_to_disk(version.indices, json_file)
                queryset_to_disk(version.collection_set, json_file)
                queryset_to_disk(version.document_set, json_file)
            queryset_to_disk(Extension.objects.all(), json_file)

        resource_files = self.dump_resources()

        # Sync files with AWS
        if environment.env != "localhost":
            logger.info("Uploading files to AWS")
            ctx = Context(environment)
            harvester_data_bucket = f"s3://{environment.aws.harvest_content_bucket}/datasets/harvester"
            for file in [dataset_file] + resource_files:
                remote_file = harvester_data_bucket + file.replace(
                    settings.DATAGROWTH_DATA_DIR, "", 1)
                ctx.run(f"aws s3 cp {file} {remote_file}", echo=True)
Beispiel #9
0
def _auto_install_matplotlib(c: Context):
    try:
        import matplotlib
        print("success: import matplotlib")
    except Exception as e:
        print(str(e))
        c.run(sys.executable + ' -m pip install matplotlib')
Beispiel #10
0
def _lint_pycodestyle(ctx: Context, path: Optional[List[str]] = None):
    """Run PEP8 checking on code; this includes primary code (source) and secondary code (tests, tasks, etc.).

    PEP8 checking is done via pycodestyle.

    Args:
        ctx: Context
        path: Path override. Run tests only on given paths.
    """
    # Why pycodestyle and pylint? So far, pylint does not check against every convention in PEP8. As pylint's
    # functionality grows, we should move all PEP8 checking to pylint and remove pycodestyle
    print_header("code style (PEP8)", level=2)
    ensure_reports_dir()

    paths = to_pathlib_path(path, [
        PROJECT_INFO.source_directory, PROJECT_INFO.tests_directory,
        PROJECT_INFO.tasks_directory
    ])

    try:
        ctx.run(
            f"pycodestyle --ignore=E501,W503,E231 --exclude=.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,*_config_parser.py "
            f"{paths_to_str(paths)} > {REPORT_PYCODESTYLE_FPATH}")
        # Ignores explained:
        # - E501: Line length is checked by PyLint
        # - W503: Disable checking of "Line break before binary operator". PEP8 recently (~2019) switched to
        #         "line break before the operator" style, so we should permit this usage.
        # - E231: "missing whitespace after ','" is a false positive. Handled by black formatter.
    finally:
        if os.path.exists(REPORT_PYCODESTYLE_FPATH):
            format_messages(read_contents(REPORT_PYCODESTYLE_FPATH))
Beispiel #11
0
 def honors_runner_config_setting(self):
     runner_class = Mock()
     config = Config({'runners': {'local': runner_class}})
     c = Context(config)
     c.run('foo')
     assert runner_class.mock_calls == [
         call(c), call().run('foo'),
     ]
Beispiel #12
0
 def honors_runner_config_setting(self):
     runner_class = Mock()
     config = Config({'runners': {'local': runner_class}})
     c = Context(config)
     c.run('foo')
     assert runner_class.mock_calls == [
         call(c), call().run('foo'),
     ]
Beispiel #13
0
        def prefixes_should_apply_to_run(self, Local):
            runner = Local.return_value
            c = Context()
            with c.prefix("cd foo"):
                c.run("whoami")

            cmd = "cd foo && whoami"
            assert runner.run.called, "run() never called runner.run()!"
            assert runner.run.call_args[0][0] == cmd
Beispiel #14
0
        def cd_should_apply_to_run(self, Local):
            runner = Local.return_value
            c = Context()
            with c.cd('foo'):
                c.run('whoami')

            cmd = "cd foo && whoami"
            assert runner.run.called, "run() never called runner.run()!"
            assert runner.run.call_args[0][0] == cmd
    def handle_label(self, dataset_label, **options):

        skip_download = options["skip_download"]
        harvest_source = options.get("harvest_source", None)
        should_index = options.get("index")
        download_edurep = options["download_edurep"]

        assert harvest_source or environment.env != "localhost", \
            "Expected a harvest source argument for a localhost environment"
        source_environment = create_configuration(harvest_source, service="harvester") \
            if harvest_source else environment

        # Delete old datasets
        dataset = Dataset.objects.filter(name=dataset_label).last()
        if dataset is not None:
            dataset.harvestsource_set.all().delete()
            dataset.harvest_set.all().delete()
            dataset.delete()
        Extension.objects.all().delete()

        if harvest_source and not skip_download:
            logger.info(f"Downloading dump file for: {dataset_label}")
            ctx = Context(environment)
            harvester_data_bucket = f"s3://{source_environment.aws.harvest_content_bucket}/datasets/harvester"
            download_edurep = options["download_edurep"]
            if download_edurep:
                ctx.run(
                    f"aws s3 sync {harvester_data_bucket} {settings.DATAGROWTH_DATA_DIR}"
                )
            else:
                ctx.run(
                    f"aws s3 sync {harvester_data_bucket} {settings.DATAGROWTH_DATA_DIR} --exclude *edurepoaipmh*"
                )
        logger.info(f"Importing dataset: {dataset_label}")
        for entry in os.scandir(get_dumps_path(Dataset)):
            if entry.is_file() and entry.name.startswith(dataset_label):
                dataset_file = entry.path
                break
        else:
            raise CommandError(
                f"Can't find a dump file for label: {dataset_label}")

        # Process dump file
        with open(dataset_file, "r") as dump_file:
            for objects in objects_from_disk(dump_file):
                self.bulk_create_objects(objects)
        # Load resources
        self.load_resources(download_edurep)
        self.reset_postgres_sequences()

        # Index data
        if should_index:
            latest_dataset_version = DatasetVersion.objects.get_current_version(
            )
            call_command("index_dataset_version",
                         dataset=latest_dataset_version.dataset.name,
                         harvester_version=latest_dataset_version.version)
Beispiel #16
0
 def _run_build_steps(self, ctx: Context) -> None:
     if self.platform in [
             SupportedPlatformEnum.WINDOWS_32,
             SupportedPlatformEnum.WINDOWS_64
     ]:
         ctx.run("nmake clean", warn=True)
         ctx.run("nmake")
     else:
         return super()._run_build_steps(ctx)
def role_credentials(
    c: Context,
    role:
    str = "CFN-VO-VSA-TWITTER-PIPELINE-TerraCFNIamBuildRole-1IPNZSY2YKHU8",
    session_name: str = "user-session",
    duration: int = 36000,
) -> Dict[str, Any]:
    """Get credentials for a role for the workspace corresponding to the active git branch."""
    workspace = current_workspace(c)
    aws_account = current_aws_account(c)
    if os.environ.get("AWS_SESSION_TOKEN"):
        os.environ.clear()
        os.environ.update(ORIG_ENV)
    try:
        role_creds: Dict[str, Any] = json.loads(
            c.run(
                f"aws sts assume-role "
                f"--role-arn arn:aws:iam::{aws_account}:role/{role} "
                f"--role-session-name {session_name} "
                f"--duration-seconds {duration}",
                hide="both",
            ).stdout)
    except UnexpectedExit:
        if duration > 3600:
            logger.warning(
                f"Could not assume {role} for {duration // 60} minutes, trying again with 60 minutes..."
            )
            duration = 3600
        elif duration < 900:
            logger.warning(
                f"Could not assume {role} for {duration // 60} minutes, trying again with 15 minutes..."
            )
            duration = 900
        else:
            logger.error(
                f"Could not assume {role} for {duration // 60} minutes")
            raise
        role_creds: Dict[str, Any] = json.loads(
            c.run(
                f"aws sts assume-role "
                f"--role-arn arn:aws:iam::{aws_account}:role/{role} "
                f"--role-session-name {session_name} "
                f"--duration-seconds {duration}",
                hide="out",
            ).stdout)
    role_creds["env"] = {
        "WORKSPACE": workspace,
        "AWS_ACCOUNT": aws_account,
        "AWS_DEFAULT_REGION": "eu-west-1",
        "AWS_SESSION_TOKEN": role_creds["Credentials"]["SessionToken"],
        "AWS_ACCESS_KEY_ID": role_creds["Credentials"]["AccessKeyId"],
        "AWS_SECRET_ACCESS_KEY": role_creds["Credentials"]["SecretAccessKey"],
    }
    logger.info(
        f"Assumed {role} for {session_name} valid for {duration // 60} minutes"
    )
    return role_creds
Beispiel #18
0
        def prefixes_should_apply_to_run(self, Local):
            runner = Local.return_value
            ctx = Context()
            with ctx.prefix('cd foo'):
                ctx.run('whoami')

            cmd = "cd foo && whoami"
            ok_(runner.run.called, "run() never called runner.run()!")
            eq_(runner.run.call_args[0][0], cmd)
Beispiel #19
0
        def cd_should_apply_to_run(self, Local):
            runner = Local.return_value
            c = Context()
            with c.cd('foo'):
                c.run('whoami')

            cmd = "cd foo && whoami"
            assert runner.run.called, "run() never called runner.run()!"
            assert runner.run.call_args[0][0] == cmd
Beispiel #20
0
def check_existing_core(c: Context, hide: bool) -> None:
    if c.run("python -c \"import core\"", warn=True, hide=hide):
        raise SystemError(
            "existing python2 core installation detected, please remove")
    if c.run("python3 -c \"import core\"", warn=True, hide=hide):
        raise SystemError(
            "existing python3 core installation detected, please remove")
    if c.run("which core-daemon", warn=True, hide=hide):
        raise SystemError("core scripts found, please remove old installation")
Beispiel #21
0
        def prefixes_should_apply_to_run(self, Local):
            runner = Local.return_value
            ctx = Context()
            with ctx.prefix('cd foo'):
                ctx.run('whoami')

            cmd = "cd foo && whoami"
            ok_(runner.run.called, "run() never called runner.run()!")
            eq_(runner.run.call_args[0][0], cmd)
Beispiel #22
0
        def cd_should_occur_before_prefixes(self, Local):
            runner = Local.return_value
            ctx = Context()
            with ctx.prefix('source venv'):
                with ctx.cd('foo'):
                    ctx.run('whoami')

            cmd = "cd foo && source venv && whoami"
            ok_(runner.run.called, "run() never called runner.run()!")
            eq_(runner.run.call_args[0][0], cmd)
Beispiel #23
0
        def cd_should_occur_before_prefixes(self, Local):
            runner = Local.return_value
            c = Context()
            with c.prefix("source venv"):
                with c.cd("foo"):
                    c.run("whoami")

            cmd = "cd foo && source venv && whoami"
            assert runner.run.called, "run() never called runner.run()!"
            assert runner.run.call_args[0][0] == cmd
Beispiel #24
0
def release_exists(c: Context) -> bool:
    """Check if the current Sentry release exists."""
    try:
        c.run(f"sentry-cli releases --org {sentry_org} info {sentry_release}",
              hide="both")
    except UnexpectedExit:
        exists = False
    else:
        exists = True
    return exists
Beispiel #25
0
        def cd_should_occur_before_prefixes(self, Local):
            runner = Local.return_value
            ctx = Context()
            with ctx.prefix('source venv'):
                with ctx.cd('foo'):
                    ctx.run('whoami')

            cmd = "cd foo && source venv && whoami"
            ok_(runner.run.called, "run() never called runner.run()!")
            eq_(runner.run.call_args[0][0], cmd)
Beispiel #26
0
def install_system(c: Context, os_info: OsInfo, hide: bool) -> None:
    if os_info.like == OsLike.DEBIAN:
        c.run(
            "sudo apt install -y automake pkg-config gcc libev-dev ebtables "
            "iproute2 ethtool tk python3-tk bash",
            hide=hide)
    elif os_info.like == OsLike.REDHAT:
        c.run(
            "sudo yum install -y automake pkgconf-pkg-config gcc gcc-c++ "
            "libev-devel iptables-ebtables iproute python3-devel python3-tkinter "
            "tk ethtool make bash",
            hide=hide)
        # centos 8+ does not support netem by default
        if os_info.name == OsName.CENTOS and os_info.version >= 8:
            c.run("sudo yum install -y kernel-modules-extra", hide=hide)
            if not c.run("sudo modprobe sch_netem", warn=True, hide=hide):
                print("\nERROR: you need to install the latest kernel")
                print("run the following, restart, and try again")
                print("sudo yum update")
                sys.exit(1)

    # attempt to setup legacy ebtables when an nftables based version is found
    r = c.run("ebtables -V", hide=hide)
    if "nf_tables" in r.stdout:
        if not c.run(
                "sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy",
                warn=True,
                hide=hide):
            print(
                "\nWARNING: unable to setup ebtables-legacy, WLAN will not work"
            )
Beispiel #27
0
def precommit(ctx: Context):
    """Run the "pre-commit" hooks on the codebase.

    Parameters
    ----------
    ctx
        Context.
    """

    message_box('Running "pre-commit" hooks on the codebase...')
    ctx.run("pre-commit run --all-files")
Beispiel #28
0
    def transpile(context: Context, watch: bool, mode="development"):
        command = ['npx', 'webpack']

        if watch:
            command.append('--watch')

        command.append('--output-path ./buck/static/js')

        command.append(f'--mode {mode}')

        context.run(' '.join(command))
Beispiel #29
0
def sha256(ctx: Context):
    """Compute the project *Pypi* package *sha256* with *OpenSSL*.

    Parameters
    ----------
    ctx
        Context.
    """

    message_box('Computing "sha256"...')
    with ctx.cd("dist"):
        ctx.run(f"openssl sha256 {PYPI_PACKAGE_NAME}-*.tar.gz")
Beispiel #30
0
def install_poetry(c: Context, dev: bool, local: bool, hide: bool) -> None:
    c.run("pipx install poetry", hide=hide)
    if local:
        with c.cd(DAEMON_DIR):
            c.run("poetry build -f wheel", hide=hide)
            c.run("sudo python3 -m pip install dist/*")
    else:
        args = "" if dev else "--no-dev"
        with c.cd(DAEMON_DIR):
            c.run(f"poetry install {args}", hide=hide)
            if dev:
                c.run("poetry run pre-commit install", hide=hide)
Beispiel #31
0
def deploy_mxnet_operator():
    """Function to deploy mxnet operator in the EKS cluster. This will support v1beta1 crd for mxjobs.
    """
    ctx = Context()
    home_dir = ctx.run("echo $HOME").stdout.strip("\n")
    mxnet_operator_dir = os.path.join(home_dir, "mxnet-operator")
    if os.path.isdir(mxnet_operator_dir):
        ctx.run(f"rm -rf {mxnet_operator_dir}")

    clone_mxnet_command = f"git clone https://github.com/kubeflow/mxnet-operator.git {mxnet_operator_dir}"
    ctx.run(clone_mxnet_command, echo=True)
    run(f"kubectl create -k {mxnet_operator_dir}/manifests/overlays/v1beta1/",
        echo=True)
Beispiel #32
0
def release(ctx: Context):
    """Release the project to *Pypi* with *Twine*.

    Parameters
    ----------
    ctx
        Context.
    """

    message_box("Releasing...")
    with ctx.cd("dist"):
        ctx.run("twine upload *.tar.gz")
        ctx.run("twine upload *.whl")
Beispiel #33
0
def deploy_mpi_operator():
    """Function to deploy mpi operator in the EKS cluster. This will support v1alpha2 crd for mpijobs.
    """
    ctx = Context()
    home_dir = ctx.run("echo $HOME").stdout.strip("\n")
    mpi_operator_dir = os.path.join(home_dir, "mpi-operator")
    if os.path.isdir(mpi_operator_dir):
        ctx.run(f"rm -rf {mpi_operator_dir}")

    clone_mxnet_command = f"git clone https://github.com/kubeflow/mpi-operator {mpi_operator_dir}"
    run(clone_mxnet_command, echo=True)
    run(f"kubectl create -f {mpi_operator_dir}/deploy/v1alpha2/mpi-operator.yaml",
        echo=True)
Beispiel #34
0
def _build():
    """
    Build local support docs tree and return the build target dir for cleanup.
    """
    c = Context()
    support = join(dirname(__file__), "_support")
    docs = join(support, "docs")
    build = join(support, "_build")
    command = "sphinx-build -c {} -W {} {}".format(support, docs, build)
    with c.cd(support):
        # Turn off stdin mirroring to avoid irritating pytest.
        c.run(command, in_stream=False)
    return build
Beispiel #35
0
    def build(self, ctx: Context) -> None:
        if self.platform in [SupportedPlatformEnum.WINDOWS_32, SupportedPlatformEnum.WINDOWS_64]:
            if self.platform == SupportedPlatformEnum.WINDOWS_32:
                arch = 'x86'
                build_script = 'bld_ml32.bat'
                build_platform = 'Win32'
            else:
                arch = 'x64'
                build_script = 'bld_ml64.bat'
                build_platform = 'x64'

            masm_path = self.src_path / 'contrib' / f'masm{arch}'
            with ctx.cd(str(masm_path)):
                ctx.run(build_script)
                ctx.run(f'msbuild ..\\vstudio\\vc14\\zlibvc.sln /P:Configuration=Release /P:Platform={build_platform}')

        else:
            # Linux/macOS build
            with ctx.cd(str(self.src_path)):
                ctx.run('CFLAGS="-fPIC" ./configure -static')
                ctx.run('make clean')
                ctx.run('make')
Beispiel #36
0
    def _run_configure_command(
            self,
            ctx: Context,
            openssl_target: str,
            zlib_lib_path: Path,
            zlib_include_path: Path
    ) -> None:
        if self.platform in [SupportedPlatformEnum.WINDOWS_32, SupportedPlatformEnum.WINDOWS_64]:
            extra_args = '-no-asm -DZLIB_WINAPI'  # *hate* zlib
            # On Windows OpenSSL wants the full path to the lib file
            final_zlib_path = zlib_lib_path
        else:
            extra_args = ' -fPIC'
            # On Unix OpenSSL wants the path to the folder where the lib is
            final_zlib_path = zlib_lib_path.parent

        ctx.run(self._OPENSSL_CONF_CMD.format(
            target=openssl_target,
            zlib_lib_path=final_zlib_path,
            zlib_include_path=zlib_include_path,
            extra_args=extra_args
        ))
Beispiel #37
0
        def nesting_should_retain_order(self, Local):
            runner = Local.return_value
            c = Context()
            with c.prefix('cd foo'):
                with c.prefix('cd bar'):
                    c.run('whoami')
                    cmd = "cd foo && cd bar && whoami"
                    assert runner.run.called, "run() never called runner.run()!" # noqa
                    assert runner.run.call_args[0][0] == cmd

                c.run('whoami')
                cmd = "cd foo && whoami"
                assert runner.run.called, "run() never called runner.run()!"
                assert runner.run.call_args[0][0] == cmd

            # also test that prefixes do not persist
            c.run('whoami')
            cmd = "whoami"
            assert runner.run.called, "run() never called runner.run()!"
            assert runner.run.call_args[0][0] == cmd
Beispiel #38
0
        def nesting_should_retain_order(self, Local):
            runner = Local.return_value
            ctx = Context()
            with ctx.prefix('cd foo'):
                with ctx.prefix('cd bar'):
                    ctx.run('whoami')
                    cmd = "cd foo && cd bar && whoami"
                    ok_(runner.run.called, "run() never called runner.run()!")
                    eq_(runner.run.call_args[0][0], cmd)

                ctx.run('whoami')
                cmd = "cd foo && whoami"
                ok_(runner.run.called, "run() never called runner.run()!")
                eq_(runner.run.call_args[0][0], cmd)

            # also test that prefixes do not persist
            ctx.run('whoami')
            cmd = "whoami"
            ok_(runner.run.called, "run() never called runner.run()!")
            eq_(runner.run.call_args[0][0], cmd)
Beispiel #39
0
 def honors_runner_config_setting(self):
     runner_class = Mock()
     config = Config({"runners": {"local": runner_class}})
     c = Context(config)
     c.run("foo")
     assert runner_class.mock_calls == [call(c), call().run("foo")]
Beispiel #40
0
 def defaults_to_Local(self, Local):
     c = Context()
     c.run("foo")
     assert Local.mock_calls == [call(c), call().run("foo")]
Beispiel #41
0
 def _run_build_steps(self, ctx: Context) -> None:
     if self.platform in [SupportedPlatformEnum.WINDOWS_32, SupportedPlatformEnum.WINDOWS_64]:
         ctx.run('nmake clean', warn=True)
         ctx.run('nmake')
     else:
         return super()._run_build_steps(ctx)
Beispiel #42
0
    def _run_build_steps(self, ctx: Context) -> None:
        if self.platform in [SupportedPlatformEnum.WINDOWS_32, SupportedPlatformEnum.WINDOWS_64]:
            if self.platform == SupportedPlatformEnum.WINDOWS_32:
                ctx.run('ms\\do_ms')
            else:
                ctx.run('ms\\do_win64a.bat')

            ctx.run('nmake -f ms\\nt.mak clean', warn=True)  # Does not work if tmp32 does not exist (fresh build)
            ctx.run('nmake -f ms\\nt.mak')

        else:
            ctx.run('make clean', warn=True)
            ctx.run('make')  # Only build the libs as it is faster - not available on Windows