def test_run_integration_test( self, oss_artifact: Path, ) -> None: """ It is possible to run DC/OS integration tests on Vagrant. This test module only requires a single master node. """ cluster_backend = Vagrant() with Cluster( cluster_backend=cluster_backend, masters=1, agents=1, public_agents=1, ) as cluster: cluster.install_dcos_from_path( build_artifact=oss_artifact, dcos_config=cluster.base_config, log_output_live=True, ip_detect_path=cluster_backend.ip_detect_path, ) cluster.wait_for_dcos_oss() # No error is raised with a successful command. cluster.run_integration_tests( pytest_command=['pytest', '-vvv', '-s', '-x', 'test_units.py'], log_output_live=True, )
def test_run_integration_test( self, oss_installer: Path, ) -> None: """ It is possible to run DC/OS integration tests on Vagrant. This test module only requires a single master node. """ cluster_backend = Vagrant() with Cluster( cluster_backend=cluster_backend, masters=1, agents=1, public_agents=1, ) as cluster: cluster.install_dcos_from_path( dcos_installer=oss_installer, dcos_config=cluster.base_config, output=Output.CAPTURE, ip_detect_path=cluster_backend.ip_detect_path, ) cluster.wait_for_dcos_oss() # No error is raised with a successful command. cluster.run_with_test_environment( args=['pytest', '-vvv', '-s', '-x', 'test_units.py'], output=Output.CAPTURE, )
def install_dcos( ctx: click.core.Context, installer: Path, extra_config: Dict[str, Any], variant: str, workspace_dir: Path, license_key: Optional[Path], security_mode: Optional[str], cluster_id: str, genconf_dir: Optional[Path], wait_for_dcos: bool, ) -> None: """ Install DC/OS on a provisioned Vagrant cluster. """ doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, ) cluster_backend = Vagrant() cluster_vms = ClusterVMs(cluster_id=cluster_id) dcos_config = get_config( cluster_representation=cluster_vms, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster_representation=cluster_vms, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, dcos_installer=installer, local_genconf_dir=genconf_dir, ) run_post_install_steps( cluster=cluster_vms.cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )
def base_config(self) -> Dict[str, Any]: """ Return a base configuration for installing DC/OS OSS. """ backend = Vagrant() return { **self.cluster.base_config, **backend.base_config, }
def vm_memory_mb_option(command: Callable[..., None]) -> Callable[..., None]: """ An option decorator for the amount of memory given to each VM. """ backend = Vagrant() function = click.option( '--vm-memory-mb', type=click.INT, default=backend.vm_memory_mb, show_default=True, help='The amount of memory to give each VM.', )(command) # type: Callable[..., None] return function
def test_default(self) -> None: """ By default, VMs include an empty description. """ with Cluster( cluster_backend=Vagrant(), masters=1, agents=0, public_agents=0, ) as cluster: (master, ) = cluster.masters new_vm_name = _get_vm_from_node(node=master) description = _description_from_vm_name(vm_name=new_vm_name) assert description is None
def provision( ctx: click.core.Context, agents: int, masters: int, public_agents: int, workspace_dir: Path, cluster_id: str, enable_selinux_enforcing: bool, vm_memory_mb: int, enable_spinner: bool, vagrant_box_url: str, vagrant_box_version: str, ) -> None: """ Provision a Vagrant cluster for installing DC/OS. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) description = { CLUSTER_ID_DESCRIPTION_KEY: cluster_id, WORKSPACE_DIR_DESCRIPTION_KEY: str(workspace_dir), } cluster_backend = Vagrant( workspace_dir=workspace_dir, virtualbox_description=json.dumps(obj=description), vm_memory_mb=vm_memory_mb, vagrant_box_url=vagrant_box_url, vagrant_box_version=vagrant_box_version, ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, enable_spinner=enable_spinner, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True)
def vagrant_box_url_option( command: Callable[..., None], ) -> Callable[..., None]: """ An option decorator for the Vagrant Box URL to use. """ backend = Vagrant() function = click.option( '--vagrant-box-url', type=click.STRING, default=backend.vagrant_box_url, show_default=True, help='The URL of the Vagrant box to use.', )(command) # type: Callable[..., None] return function
def test_custom(self) -> None: """ It is possible to set a custom description for VMs. """ description = uuid.uuid4().hex with Cluster( cluster_backend=Vagrant(virtualbox_description=description), masters=1, agents=0, public_agents=0, ) as cluster: (master, ) = cluster.masters new_vm_name = _get_vm_from_node(node=master) vm_description = _description_from_vm_name(vm_name=new_vm_name) assert vm_description == description
def vagrant_client(self) -> Any: """ A Vagrant client attached to this cluster. """ vm_names = self._vm_names() # We are not creating VMs so these have to be set but do not # matter as long as they are valid to use the Vagrantfile. backend = Vagrant() description = backend.virtualbox_description vm_memory_mb = backend.vm_memory_mb vagrant_box_version = backend.vagrant_box_version vagrant_box_url = backend.vagrant_box_url vagrant_env = { 'HOME': os.environ['HOME'], 'PATH': os.environ['PATH'], 'VM_NAMES': ','.join(list(vm_names)), 'VM_DESCRIPTION': description, 'VM_MEMORY': str(vm_memory_mb), 'VAGRANT_BOX_VERSION': vagrant_box_version, 'VAGRANT_BOX_URL': vagrant_box_url, } [vagrant_root_parent] = [ item for item in self._workspace_dir.iterdir() if item.is_dir() and item.name != 'genconf' ] # We ignore files such as .DS_Store files. [ vagrant_root, ] = [item for item in vagrant_root_parent.iterdir() if item.is_dir()] # We import Vagrant here instead of at the top of the file because, if # the Vagrant executable is not found, a warning is logged. # # We want to avoid that warning for users of other backends who do not # have the Vagrant executable. import vagrant vagrant_client = vagrant.Vagrant( root=str(vagrant_root), env=vagrant_env, quiet_stdout=False, quiet_stderr=True, ) return vagrant_client
def vagrant_box_version_option( command: Callable[..., None], ) -> Callable[..., None]: """ An option decorator for the Vagrant Box version to use. """ backend = Vagrant() version_constraint_url = ( 'https://www.vagrantup.com/docs/boxes/versioning.html' '#version-constraints') function = click.option( '--vagrant-box-version', type=click.STRING, default=backend.vagrant_box_version, show_default=True, help=('The version of the Vagrant box to use. ' 'See {version_constraint_url} for details.').format( version_constraint_url=version_constraint_url), )(command) # type: Callable[..., None] return function
def create( agents: int, artifact: str, extra_config: Dict[str, Any], masters: int, public_agents: int, variant: str, workspace_dir: Optional[Path], license_key: Optional[str], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], cluster_id: str, ) -> None: """ Create a DC/OS cluster. DC/OS Enterprise \b DC/OS Enterprise clusters require different configuration variables to DC/OS OSS. For example, enterprise clusters require the following configuration parameters: ``superuser_username``, ``superuser_password_hash``, ``fault_domain_enabled``, ``license_key_contents`` \b These can all be set in ``--extra-config``. However, some defaults are provided for all but the license key. \b The default superuser username is ``admin``. The default superuser password is ``admin``. The default ``fault_domain_enabled`` is ``false``. \b ``license_key_contents`` must be set for DC/OS Enterprise 1.11 and above. This is set to one of the following, in order: \b * The ``license_key_contents`` set in ``--extra-config``. * The contents of the path given with ``--license-key``. * The contents of the path set in the ``DCOS_LICENSE_KEY_PATH`` environment variable. \b If none of these are set, ``license_key_contents`` is not given. """ # noqa: E501 base_workspace_dir = workspace_dir or Path(tempfile.gettempdir()) workspace_dir = base_workspace_dir / uuid.uuid4().hex workspace_dir.mkdir(parents=True) doctor_message = 'Try `dcos-vagrant doctor` for troubleshooting help.' artifact_path = Path(artifact).resolve() if variant == 'auto': variant = get_variant( artifact_path=artifact_path, workspace_dir=workspace_dir, doctor_message=doctor_message, ) enterprise = bool(variant == 'enterprise') description = { CLUSTER_ID_DESCRIPTION_KEY: cluster_id, WORKSPACE_DIR_DESCRIPTION_KEY: str(workspace_dir), VARIANT_DESCRIPTION_KEY: 'ee' if enterprise else '', } cluster_backend = Vagrant( workspace_dir=workspace_dir, virtualbox_description=json.dumps(obj=description), ) if enterprise: superuser_username = '******' superuser_password = '******' enterprise_extra_config = { 'superuser_username': superuser_username, 'superuser_password_hash': sha512_crypt.hash(superuser_password), 'fault_domain_enabled': False, } if license_key is not None: key_contents = Path(license_key).read_text() enterprise_extra_config['license_key_contents'] = key_contents extra_config = {**enterprise_extra_config, **extra_config} if security_mode is not None: extra_config['security'] = security_mode try: cluster = Cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, files_to_copy_to_installer=[], ) except CalledProcessError as exc: click.echo('Error creating cluster.', err=True) click.echo(doctor_message) sys.exit(exc.returncode) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, ) try: with click_spinner.spinner(): cluster.install_dcos_from_path( build_artifact=artifact_path, dcos_config={ **cluster.base_config, **extra_config, }, ) except CalledProcessError as exc: click.echo('Error installing DC/OS.', err=True) click.echo(doctor_message) cluster.destroy() sys.exit(exc.returncode)
def create( ctx: click.core.Context, agents: int, installer: str, extra_config: Dict[str, Any], masters: int, public_agents: int, variant: str, workspace_dir: Optional[Path], license_key: Optional[str], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], cluster_id: str, verbose: int, enable_selinux_enforcing: bool, genconf_dir: Optional[Path], wait_for_dcos: bool, ) -> None: """ Create a DC/OS cluster. DC/OS Enterprise \b DC/OS Enterprise clusters require different configuration variables to DC/OS OSS. For example, enterprise clusters require the following configuration parameters: ``superuser_username``, ``superuser_password_hash``, ``fault_domain_enabled``, ``license_key_contents`` \b These can all be set in ``--extra-config``. However, some defaults are provided for all but the license key. \b The default superuser username is ``admin``. The default superuser password is ``admin``. The default ``fault_domain_enabled`` is ``false``. \b ``license_key_contents`` must be set for DC/OS Enterprise 1.11 and above. This is set to one of the following, in order: \b * The ``license_key_contents`` set in ``--extra-config``. * The contents of the path given with ``--license-key``. * The contents of the path set in the ``DCOS_LICENSE_KEY_PATH`` environment variable. \b If none of these are set, ``license_key_contents`` is not given. """ # noqa: E501 set_logging(verbosity_level=verbose) check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) base_workspace_dir = workspace_dir or Path(tempfile.gettempdir()) workspace_dir = base_workspace_dir / uuid.uuid4().hex workspace_dir.mkdir(parents=True) doctor_message = get_doctor_message(sibling_ctx=ctx, doctor_command=doctor) installer_path = Path(installer).resolve() dcos_variant = get_variant( given_variant=variant, installer_path=installer_path, workspace_dir=workspace_dir, doctor_message=doctor_message, ) variant_label_value = { DCOSVariant.OSS: VARIANT_OSS_DESCRIPTION_VALUE, DCOSVariant.ENTERPRISE: VARIANT_ENTERPRISE_DESCRIPTION_VALUE, }[dcos_variant] description = { CLUSTER_ID_DESCRIPTION_KEY: cluster_id, WORKSPACE_DIR_DESCRIPTION_KEY: str(workspace_dir), VARIANT_DESCRIPTION_KEY: variant_label_value, } cluster_backend = Vagrant( workspace_dir=workspace_dir, virtualbox_description=json.dumps(obj=description), ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, sibling_ctx=ctx, doctor_command=doctor, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, ) files_to_copy_to_genconf_dir = [] if genconf_dir is not None: container_genconf_path = Path('/genconf') for genconf_file in genconf_dir.glob('*'): genconf_relative = genconf_file.relative_to(genconf_dir) relative_path = container_genconf_path / genconf_relative files_to_copy_to_genconf_dir.append((genconf_file, relative_path)) dcos_config = get_config( cluster=cluster, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster=cluster, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir, doctor_command=doctor, sibling_ctx=ctx, installer=installer_path, ) superuser_username = dcos_config.get( 'superuser_username', DEFAULT_SUPERUSER_USERNAME, ) superuser_password = dcos_config.get( 'superuser_password', DEFAULT_SUPERUSER_PASSWORD, ) if wait_for_dcos: dcos_e2e_cli.common.wait.wait_for_dcos( dcos_variant=dcos_variant, cluster=cluster, superuser_username=superuser_username, superuser_password=superuser_password, http_checks=True, doctor_command=doctor, sibling_ctx=ctx, ) return show_cluster_started_message( # We work on the assumption that the ``wait`` command is a sibling # command of this one. sibling_ctx=ctx, wait_command=wait, cluster_id=cluster_id, ) click.echo(cluster_id)
def create( ctx: click.core.Context, agents: int, installer: Path, extra_config: Dict[str, Any], masters: int, public_agents: int, variant: str, workspace_dir: Path, license_key: Optional[Path], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], cluster_id: str, enable_selinux_enforcing: bool, genconf_dir: Optional[Path], wait_for_dcos: bool, ) -> None: """ Create a DC/OS cluster. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, ) description = { CLUSTER_ID_DESCRIPTION_KEY: cluster_id, WORKSPACE_DIR_DESCRIPTION_KEY: str(workspace_dir), } cluster_backend = Vagrant( workspace_dir=workspace_dir, virtualbox_description=json.dumps(obj=description), ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, ) cluster_vms = ClusterVMs(cluster_id=cluster_id) dcos_config = get_config( cluster_representation=cluster_vms, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster_representation=cluster_vms, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, dcos_installer=installer, local_genconf_dir=genconf_dir, ) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )
def upgrade( ctx: click.core.Context, cluster_id: str, extra_config: Dict[str, Any], security_mode: Optional[str], license_key: Optional[Path], variant: str, workspace_dir: Path, installer: Path, wait_for_dcos: bool, enable_spinner: bool, files_to_copy_to_genconf_dir: List[Tuple[Path, Path]], ) -> None: """ Upgrade a cluster to a given version of DC/OS. """ doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_vms = ClusterVMs(cluster_id=cluster_id) cluster_backend = Vagrant() cluster = cluster_vms.cluster dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, enable_spinner=enable_spinner, ) dcos_config = get_config( cluster_representation=cluster_vms, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) cluster_upgrade_dcos_from_path( cluster=cluster, cluster_representation=cluster_vms, dcos_installer=installer, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir, enable_spinner=enable_spinner, ) wait_command_name = command_path(sibling_ctx=ctx, command=wait) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, enable_spinner=enable_spinner, )