def find_or_create_session(context: click.core.Context, session: Optional[str], build_name: Optional[str], flavor=[]) -> Optional[str]: """Determine the test session ID to be used. 1. If the user explicitly provides the session id via the `--session` option 2. If the user gives no options, the current session ID is read from the session file tied to $PWD, or one is created from the current build name. See https://github.com/launchableinc/cli/pull/342 3. The `--build` option is legacy compatible behaviour, in which case a session gets created and tied to the build. Args: session: The --session option value build_name: The --build option value """ from .record.session import session as session_command if session: return session saved_build_name = read_build() if not saved_build_name: raise click.UsageError( click.style( "Have you run `launchable record build`?\nIf not, please do. If it was run elsewhere/earlier, please use the --session option", fg="yellow")) else: if build_name and saved_build_name != build_name: raise click.UsageError( click.style( "Given build name ({}) is different from when you ran `launchable record build --name {}`.\nMake sure to run `launchable record build --name {}` before." .format(build_name, saved_build_name, build_name), fg="yellow")) session_id = read_session(saved_build_name) if session_id: return session_id else: context.invoke(session_command, build_name=saved_build_name, save_session_file=True, print_session=False, flavor=flavor) return read_session(saved_build_name)
def destroy_list( ctx: click.core.Context, cluster_ids: List[str], ) -> None: """ Destroy clusters. To destroy all clusters, run ``dcos-vagrant destroy $(dcos-vagrant list)``. """ for cluster_id in cluster_ids: if cluster_id not in existing_cluster_ids(): warning = 'Cluster "{cluster_id}" does not exist'.format( cluster_id=cluster_id, ) click.echo(warning, err=True) continue ctx.invoke( destroy, cluster_id=cluster_id, )
def destroy_list( ctx: click.core.Context, cluster_ids: List[str], transport: Transport, ) -> None: """ Destroy clusters. To destroy all clusters, run ``minidcos docker destroy $(minidcos docker list)``. """ for cluster_id in cluster_ids: if cluster_id not in existing_cluster_ids(): warning = 'Cluster "{cluster_id}" does not exist'.format( cluster_id=cluster_id, ) click.echo(warning, err=True) continue ctx.invoke( destroy, cluster_id=cluster_id, transport=transport, )
def create( ctx: click.core.Context, agents: int, artifact: str, cluster_id: str, docker_storage_driver: str, docker_version: str, extra_config: Dict[str, Any], linux_distribution: str, masters: int, public_agents: int, license_key: Optional[str], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], genconf_dir: Optional[Path], workspace_dir: Optional[Path], custom_volume: List[Mount], custom_master_volume: List[Mount], custom_agent_volume: List[Mount], custom_public_agent_volume: List[Mount], variant: str, transport: Transport, wait_for_dcos: bool, network: Network, ) -> None: """ Create a DC/OS cluster. DC/OS Enterprise \b DC/OS Enterprise clusters require different configuration variables to DC/OS OSS. For example, enterprise clusters require the following configuration parameters: ``superuser_username``, ``superuser_password_hash``, ``fault_domain_enabled``, ``license_key_contents`` \b These can all be set in ``--extra-config``. However, some defaults are provided for all but the license key. \b The default superuser username is ``admin``. The default superuser password is ``admin``. The default ``fault_domain_enabled`` is ``false``. \b ``license_key_contents`` must be set for DC/OS Enterprise 1.11 and above. This is set to one of the following, in order: \b * The ``license_key_contents`` set in ``--extra-config``. * The contents of the path given with ``--license-key``. * The contents of the path set in the ``DCOS_LICENSE_KEY_PATH`` environment variable. \b If none of these are set, ``license_key_contents`` is not given. """ # noqa: E501 base_workspace_dir = workspace_dir or Path(tempfile.gettempdir()) workspace_dir = base_workspace_dir / uuid.uuid4().hex doctor_message = 'Try `dcos-docker doctor` for troubleshooting help.' ssh_keypair_dir = workspace_dir / 'ssh' ssh_keypair_dir.mkdir(parents=True) public_key_path = ssh_keypair_dir / 'id_rsa.pub' private_key_path = ssh_keypair_dir / 'id_rsa' _write_key_pair( public_key_path=public_key_path, private_key_path=private_key_path, ) artifact_path = Path(artifact).resolve() if variant == 'auto': variant = get_variant( artifact_path=artifact_path, workspace_dir=workspace_dir, doctor_message=doctor_message, ) enterprise = bool(variant == 'enterprise') if enterprise: superuser_username = '******' superuser_password = '******' enterprise_extra_config = { 'superuser_username': superuser_username, 'superuser_password_hash': sha512_crypt.hash(superuser_password), 'fault_domain_enabled': False, } if license_key is not None: key_contents = Path(license_key).read_text() enterprise_extra_config['license_key_contents'] = key_contents extra_config = {**enterprise_extra_config, **extra_config} if security_mode is not None: extra_config['security'] = security_mode files_to_copy_to_installer = [] if genconf_dir is not None: container_genconf_path = Path('/genconf') for genconf_file in genconf_dir.glob('*'): genconf_relative = genconf_file.relative_to(genconf_dir) relative_path = container_genconf_path / genconf_relative files_to_copy_to_installer.append((genconf_file, relative_path)) cluster_backend = Docker( custom_container_mounts=custom_volume, custom_master_mounts=custom_master_volume, custom_agent_mounts=custom_agent_volume, custom_public_agent_mounts=custom_public_agent_volume, linux_distribution=LINUX_DISTRIBUTIONS[linux_distribution], docker_version=DOCKER_VERSIONS[docker_version], storage_driver=DOCKER_STORAGE_DRIVERS.get(docker_storage_driver), docker_container_labels={ CLUSTER_ID_LABEL_KEY: cluster_id, WORKSPACE_DIR_LABEL_KEY: str(workspace_dir), VARIANT_LABEL_KEY: 'ee' if enterprise else '', }, docker_master_labels={'node_type': 'master'}, docker_agent_labels={'node_type': 'agent'}, docker_public_agent_labels={'node_type': 'public_agent'}, workspace_dir=workspace_dir, transport=transport, network=network, ) try: cluster = Cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, files_to_copy_to_installer=files_to_copy_to_installer, ) except CalledProcessError as exc: click.echo('Error creating cluster.', err=True) click.echo(doctor_message) sys.exit(exc.returncode) nodes = { *cluster.masters, *cluster.agents, *cluster.public_agents, } for node in nodes: node.run( args=['echo', '', '>>', '/root/.ssh/authorized_keys'], shell=True, ) node.run( args=[ 'echo', public_key_path.read_text(), '>>', '/root/.ssh/authorized_keys', ], shell=True, ) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, ) try: with click_spinner.spinner(): cluster.install_dcos_from_path( build_artifact=artifact_path, dcos_config={ **cluster.base_config, **extra_config, }, ) except CalledProcessError as exc: click.echo('Error installing DC/OS.', err=True) click.echo(doctor_message) cluster.destroy() sys.exit(exc.returncode) click.echo(cluster_id) if wait_for_dcos: ctx.invoke( wait, cluster_id=cluster_id, transport=transport, skip_http_checks=bool(transport == Transport.DOCKER_EXEC), ) return started_message = ( 'Cluster "{cluster_id}" has started. ' 'Run "dcos-docker wait --cluster-id {cluster_id}" to wait for DC/OS ' 'to become ready.').format(cluster_id=cluster_id) click.echo(started_message, err=True)
def run( ctx: click.core.Context, cluster_id: str, node_args: Tuple[str], sync_dir: Optional[Path], dcos_login_uname: str, dcos_login_pw: str, no_test_env: bool, node: Node, env: Dict[str, str], transport: Transport, ) -> None: """ Run an arbitrary command on a node. This command sets up the environment so that ``pytest`` can be run. For example, run ``dcos-docker run --cluster-id 1231599 pytest -k test_tls.py``. Or, with sync: ``dcos-docker run --sync-dir . --cluster-id 1231599 pytest -k test_tls.py``. To use special characters such as single quotes in your command, wrap the whole command in double quotes. """ # noqa: E501 if sync_dir is not None: ctx.invoke( sync_code, cluster_id=cluster_id, dcos_checkout_dir=str(sync_dir), transport=transport, ) if transport == Transport.DOCKER_EXEC: columns, rows = click.get_terminal_size() # See https://github.com/moby/moby/issues/35407. env = { 'COLUMNS': str(columns), 'LINES': str(rows), **env, } if no_test_env: try: node.run( args=list(node_args), log_output_live=False, tty=True, shell=True, env=env, transport=transport, ) except subprocess.CalledProcessError as exc: sys.exit(exc.returncode) return cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) cluster = cluster_containers.cluster env = { 'DCOS_LOGIN_UNAME': dcos_login_uname, 'DCOS_LOGIN_PW': dcos_login_pw, **env, } try: cluster.run_integration_tests( pytest_command=list(node_args), tty=True, env=env, test_host=node, transport=transport, ) except subprocess.CalledProcessError as exc: sys.exit(exc.returncode)
def restart(ctx: click.core.Context, status: Status, **kwargs: Any) -> None: # pylint: disable=unused-argument ctx.invoke(stop) ctx.forward(start)