def install_dcos( ctx: click.core.Context, installer: Path, extra_config: Dict[str, Any], variant: str, workspace_dir: Path, license_key: Optional[Path], security_mode: Optional[str], cluster_id: str, genconf_dir: Optional[Path], wait_for_dcos: bool, ) -> None: """ Install DC/OS on a provisioned Vagrant cluster. """ doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, ) cluster_backend = Vagrant() cluster_vms = ClusterVMs(cluster_id=cluster_id) dcos_config = get_config( cluster_representation=cluster_vms, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster_representation=cluster_vms, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, dcos_installer=installer, local_genconf_dir=genconf_dir, ) run_post_install_steps( cluster=cluster_vms.cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )
def wait( ctx: click.core.Context, cluster_id: str, superuser_username: str, superuser_password: str, transport: Transport, skip_http_checks: bool, enable_spinner: bool, ) -> None: """ Wait for DC/OS to start. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) http_checks = not skip_http_checks doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_for_dcos( cluster=cluster_containers.cluster, superuser_username=superuser_username, superuser_password=superuser_password, http_checks=http_checks, doctor_command_name=doctor_command_name, enable_spinner=enable_spinner, )
def wait( ctx: click.core.Context, cluster_id: str, superuser_username: str, superuser_password: str, enable_spinner: bool, ) -> None: """ Wait for DC/OS to start. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_vms = ClusterVMs(cluster_id=cluster_id) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_for_dcos( cluster=cluster_vms.cluster, superuser_username=superuser_username, superuser_password=superuser_password, http_checks=True, doctor_command_name=doctor_command_name, enable_spinner=enable_spinner, )
def wait( ctx: click.core.Context, cluster_id: str, superuser_username: str, superuser_password: str, aws_region: str, ) -> None: """ Wait for DC/OS to start. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(aws_region=aws_region), ) cluster_instances = ClusterInstances( cluster_id=cluster_id, aws_region=aws_region, ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_for_dcos( cluster=cluster_instances.cluster, superuser_username=superuser_username, superuser_password=superuser_password, http_checks=True, doctor_command_name=doctor_command_name, )
def run( ctx: click.core.Context, cluster_id: str, node_args: Tuple[str], sync_dir: Tuple[Path], dcos_login_uname: str, dcos_login_pw: str, test_env: bool, node: Tuple[str], env: Dict[str, str], transport: Transport, ) -> None: """ Run an arbitrary command on a node or multiple nodes. To use special characters such as single quotes in your command, wrap the whole command in double quotes. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) cluster = cluster_containers.cluster for dcos_checkout_dir in sync_dir: sync_code_to_masters( cluster=cluster, dcos_checkout_dir=dcos_checkout_dir, sudo=False, ) inspect_command_name = command_path( sibling_ctx=ctx, command=inspect_cluster, ) hosts = get_nodes( cluster_id=cluster_id, cluster_representation=cluster_containers, node_references=node, inspect_command_name=inspect_command_name, ) for host in hosts: run_command( args=list(node_args), cluster=cluster, host=host, use_test_env=test_env, dcos_login_uname=dcos_login_uname, dcos_login_pw=dcos_login_pw, env=env, transport=transport, )
def provision( ctx: click.core.Context, agents: int, masters: int, public_agents: int, workspace_dir: Path, cluster_id: str, enable_selinux_enforcing: bool, vm_memory_mb: int, enable_spinner: bool, vagrant_box_url: str, vagrant_box_version: str, ) -> None: """ Provision a Vagrant cluster for installing DC/OS. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) description = { CLUSTER_ID_DESCRIPTION_KEY: cluster_id, WORKSPACE_DIR_DESCRIPTION_KEY: str(workspace_dir), } cluster_backend = Vagrant( workspace_dir=workspace_dir, virtualbox_description=json.dumps(obj=description), vm_memory_mb=vm_memory_mb, vagrant_box_url=vagrant_box_url, vagrant_box_version=vagrant_box_version, ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, enable_spinner=enable_spinner, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True)
def send_file( ctx: click.core.Context, cluster_id: str, node: Tuple[str], transport: Transport, source: str, destination: str, ) -> None: """ Send a file to a node or multiple nodes. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) inspect_command_name = command_path( sibling_ctx=ctx, command=inspect_cluster, ) hosts = get_nodes( cluster_id=cluster_id, cluster_representation=cluster_containers, node_references=node, inspect_command_name=inspect_command_name, ) for host in hosts: host.send_file( local_path=Path(source), remote_path=Path(destination), transport=transport, sudo=False, )
def send_file( ctx: click.core.Context, cluster_id: str, node: Tuple[str], source: Path, destination: Path, ) -> None: """ Send a file to a node or multiple nodes. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_vms = ClusterVMs(cluster_id=cluster_id) inspect_command_name = command_path( sibling_ctx=ctx, command=inspect_cluster, ) hosts = get_nodes( cluster_id=cluster_id, cluster_representation=cluster_vms, node_references=node, inspect_command_name=inspect_command_name, ) for host in hosts: host.send_file( local_path=source, remote_path=destination, transport=Transport.SSH, sudo=True, )
def create( ctx: click.core.Context, agents: int, installer: Path, cluster_id: str, docker_storage_driver: Optional[DockerStorageDriver], docker_version: DockerVersion, extra_config: Dict[str, Any], linux_distribution: Distribution, masters: int, public_agents: int, license_key: Optional[Path], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], genconf_dir: Optional[Path], workspace_dir: Path, custom_volume: List[Mount], custom_master_volume: List[Mount], custom_agent_volume: List[Mount], custom_public_agent_volume: List[Mount], variant: str, transport: Transport, wait_for_dcos: bool, network: Network, one_master_host_port_map: Dict[str, int], mount_sys_fs_cgroup: bool, ) -> None: """ Create a DC/OS cluster. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) http_checks = bool(transport == Transport.SSH) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) public_key_path = workspace_dir / 'id_rsa.pub' private_key_path = workspace_dir / 'id_rsa' write_key_pair( public_key_path=public_key_path, private_key_path=private_key_path, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, ) # This is useful for some people to identify containers. container_name_prefix = Docker().container_name_prefix + '-' + cluster_id cluster_backend = Docker( container_name_prefix=container_name_prefix, custom_container_mounts=custom_volume, custom_master_mounts=custom_master_volume, custom_agent_mounts=custom_agent_volume, custom_public_agent_mounts=custom_public_agent_volume, linux_distribution=linux_distribution, docker_version=docker_version, storage_driver=docker_storage_driver, docker_container_labels={ CLUSTER_ID_LABEL_KEY: cluster_id, WORKSPACE_DIR_LABEL_KEY: str(workspace_dir), }, docker_master_labels={ NODE_TYPE_LABEL_KEY: NODE_TYPE_MASTER_LABEL_VALUE, }, docker_agent_labels={NODE_TYPE_LABEL_KEY: NODE_TYPE_AGENT_LABEL_VALUE}, docker_public_agent_labels={ NODE_TYPE_LABEL_KEY: NODE_TYPE_PUBLIC_AGENT_LABEL_VALUE, }, workspace_dir=workspace_dir, transport=transport, network=network, one_master_host_port_map=one_master_host_port_map, mount_sys_fs_cgroup=mount_sys_fs_cgroup, ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) private_ssh_key_path = cluster_containers.ssh_key_path private_ssh_key_path.parent.mkdir(parents=True) private_key_path.replace(private_ssh_key_path) add_authorized_key(cluster=cluster, public_key_path=public_key_path) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, ) dcos_config = get_config( cluster_representation=cluster_containers, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster_representation=cluster_containers, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, dcos_installer=installer, local_genconf_dir=genconf_dir, ) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=http_checks, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )
def provision( ctx: click.core.Context, agents: int, masters: int, public_agents: int, workspace_dir: Path, copy_to_master: List[Tuple[Path, Path]], aws_instance_type: str, aws_region: str, linux_distribution: str, cluster_id: str, enable_selinux_enforcing: bool, custom_tag: Dict[str, str], enable_spinner: bool, ) -> None: """ Provision an AWS cluster to install DC/OS. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(aws_region=aws_region), ) ssh_keypair_dir = workspace_dir / 'ssh' ssh_keypair_dir.mkdir(parents=True) key_name = 'key-{random}'.format(random=uuid.uuid4().hex) public_key_path = ssh_keypair_dir / 'id_rsa.pub' private_key_path = ssh_keypair_dir / 'id_rsa' write_key_pair( public_key_path=public_key_path, private_key_path=private_key_path, ) ec2 = boto3.resource('ec2', region_name=aws_region) ec2.import_key_pair( KeyName=key_name, PublicKeyMaterial=public_key_path.read_bytes(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) ssh_user = { Distribution.CENTOS_7: 'centos', Distribution.UBUNTU_16_04: 'ubuntu', Distribution.RHEL_7: 'ec2-user', } distribution = LINUX_DISTRIBUTIONS[linux_distribution] default_user = ssh_user[distribution] cluster_tags = { SSH_USER_TAG_KEY: default_user, CLUSTER_ID_TAG_KEY: cluster_id, WORKSPACE_DIR_TAG_KEY: str(workspace_dir), KEY_NAME_TAG_KEY: key_name, **custom_tag, } master_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_MASTER_TAG_VALUE} agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_AGENT_TAG_VALUE} public_agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_PUBLIC_AGENT_TAG_VALUE} cluster_backend = AWS( aws_key_pair=(key_name, private_key_path), workspace_dir=workspace_dir, aws_instance_type=aws_instance_type, aws_region=aws_region, linux_distribution=distribution, ec2_instance_tags=cluster_tags, master_ec2_instance_tags=master_tags, agent_ec2_instance_tags=agent_tags, public_agent_ec2_instance_tags=public_agent_tags, aws_cloudformation_stack_name=cluster_id, ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, enable_spinner=enable_spinner, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, sudo=True, )
def provision( ctx: click.core.Context, agents: int, cluster_id: str, docker_storage_driver: Optional[DockerStorageDriver], docker_version: DockerVersion, linux_distribution: Distribution, masters: int, public_agents: int, workspace_dir: Path, custom_volume: List[Mount], custom_master_volume: List[Mount], custom_agent_volume: List[Mount], custom_public_agent_volume: List[Mount], transport: Transport, network: Network, one_master_host_port_map: Dict[str, int], mount_sys_fs_cgroup: bool, enable_spinner: bool, ) -> None: """ Provision Docker containers to install a DC/OS cluster. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) public_key_path = workspace_dir / 'id_rsa.pub' private_key_path = workspace_dir / 'id_rsa' write_key_pair( public_key_path=public_key_path, private_key_path=private_key_path, ) # This is useful for some people to identify containers. container_name_prefix = Docker().container_name_prefix + '-' + cluster_id cluster_backend = Docker( container_name_prefix=container_name_prefix, custom_container_mounts=custom_volume, custom_master_mounts=custom_master_volume, custom_agent_mounts=custom_agent_volume, custom_public_agent_mounts=custom_public_agent_volume, linux_distribution=linux_distribution, docker_version=docker_version, storage_driver=docker_storage_driver, docker_container_labels={ CLUSTER_ID_LABEL_KEY: cluster_id, WORKSPACE_DIR_LABEL_KEY: str(workspace_dir), }, docker_master_labels={ NODE_TYPE_LABEL_KEY: NODE_TYPE_MASTER_LABEL_VALUE, }, docker_agent_labels={NODE_TYPE_LABEL_KEY: NODE_TYPE_AGENT_LABEL_VALUE}, docker_public_agent_labels={ NODE_TYPE_LABEL_KEY: NODE_TYPE_PUBLIC_AGENT_LABEL_VALUE, }, workspace_dir=workspace_dir, transport=transport, network=network, one_master_host_port_map=one_master_host_port_map, mount_sys_fs_cgroup=mount_sys_fs_cgroup, ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, enable_spinner=enable_spinner, ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) private_ssh_key_path = cluster_containers.ssh_key_path private_ssh_key_path.parent.mkdir(parents=True) private_key_path.replace(private_ssh_key_path) add_authorized_key(cluster=cluster, public_key_path=public_key_path)
def upgrade( ctx: click.core.Context, cluster_id: str, transport: Transport, extra_config: Dict[str, Any], security_mode: Optional[str], license_key: Optional[Path], variant: str, workspace_dir: Path, installer: Path, wait_for_dcos: bool, enable_spinner: bool, files_to_copy_to_genconf_dir: List[Tuple[Path, Path]], ) -> None: """ Upgrade a cluster to a given version of DC/OS. """ doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) cluster_backend = Docker() cluster = cluster_containers.cluster dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, enable_spinner=enable_spinner, ) dcos_config = get_config( cluster_representation=cluster_containers, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) cluster_upgrade_dcos_from_path( cluster=cluster, cluster_representation=cluster_containers, dcos_installer=installer, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir, enable_spinner=enable_spinner, ) http_checks = bool(transport == Transport.SSH) wait_command_name = command_path(sibling_ctx=ctx, command=wait) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=http_checks, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, enable_spinner=enable_spinner, )
def create( ctx: click.core.Context, agents: int, installer_url: str, extra_config: Dict[str, Any], masters: int, public_agents: int, variant: str, workspace_dir: Path, license_key: Optional[Path], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], aws_region: str, linux_distribution: str, cluster_id: str, enable_selinux_enforcing: bool, genconf_dir: Optional[Path], custom_tag: Dict[str, str], wait_for_dcos: bool, ) -> None: """ Create a DC/OS cluster. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(aws_region=aws_region), ) ssh_keypair_dir = workspace_dir / 'ssh' ssh_keypair_dir.mkdir(parents=True) key_name = 'key-{random}'.format(random=uuid.uuid4().hex) public_key_path = ssh_keypair_dir / 'id_rsa.pub' private_key_path = ssh_keypair_dir / 'id_rsa' write_key_pair( public_key_path=public_key_path, private_key_path=private_key_path, ) ec2 = boto3.resource('ec2', region_name=aws_region) ec2.import_key_pair( KeyName=key_name, PublicKeyMaterial=public_key_path.read_bytes(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=None, workspace_dir=workspace_dir, doctor_message=doctor_message, ) ssh_user = { Distribution.CENTOS_7: 'centos', Distribution.COREOS: 'core', Distribution.UBUNTU_16_04: 'ubuntu', Distribution.RHEL_7: 'ec2-user', } distribution = LINUX_DISTRIBUTIONS[linux_distribution] default_user = ssh_user[distribution] cluster_tags = { SSH_USER_TAG_KEY: default_user, CLUSTER_ID_TAG_KEY: cluster_id, WORKSPACE_DIR_TAG_KEY: str(workspace_dir), KEY_NAME_TAG_KEY: key_name, **custom_tag, } master_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_MASTER_TAG_VALUE} agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_AGENT_TAG_VALUE} public_agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_PUBLIC_AGENT_TAG_VALUE} cluster_backend = AWS( aws_key_pair=(key_name, private_key_path), workspace_dir=workspace_dir, aws_region=aws_region, linux_distribution=distribution, ec2_instance_tags=cluster_tags, master_ec2_instance_tags=master_tags, agent_ec2_instance_tags=agent_tags, public_agent_ec2_instance_tags=public_agent_tags, ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, sudo=True, ) cluster_instances = ClusterInstances( cluster_id=cluster_id, aws_region=aws_region, ) dcos_config = get_config( cluster_representation=cluster_instances, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_url( cluster_representation=cluster_instances, dcos_config=dcos_config, dcos_installer_url=installer_url, doctor_message=doctor_message, local_genconf_dir=genconf_dir, ip_detect_path=cluster_backend.ip_detect_path, ) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )
def install_dcos( ctx: click.core.Context, cluster_id: str, genconf_dir: Optional[Path], installer: Path, license_key: Optional[Path], extra_config: Dict[str, Any], security_mode: Optional[str], variant: str, workspace_dir: Path, transport: Transport, wait_for_dcos: bool, ) -> None: """ Install DC/OS on the given Docker cluster. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) cluster = cluster_containers.cluster doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) http_checks = bool(transport == Transport.SSH) wait_command_name = command_path(sibling_ctx=ctx, command=wait) cluster_backend = Docker() ip_detect_path = cluster_backend.ip_detect_path dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, ) dcos_config = get_config( cluster_representation=cluster_containers, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster_representation=cluster_containers, dcos_config=dcos_config, ip_detect_path=ip_detect_path, doctor_message=doctor_message, dcos_installer=installer, local_genconf_dir=genconf_dir, ) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=http_checks, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )
def install_dcos( ctx: click.core.Context, installer_url: str, extra_config: Dict[str, Any], variant: str, workspace_dir: Path, license_key: Optional[Path], security_mode: Optional[str], aws_region: str, cluster_id: str, files_to_copy_to_genconf_dir: List[Tuple[Path, Path]], wait_for_dcos: bool, enable_spinner: bool, ) -> None: """ Install DC/OS on a provisioned AWS cluster. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(aws_region=aws_region), ) cluster_instances = ClusterInstances( cluster_id=cluster_id, aws_region=aws_region, ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=None, workspace_dir=workspace_dir, doctor_message=doctor_message, enable_spinner=enable_spinner, ) dcos_config = get_config( cluster_representation=cluster_instances, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) cluster_backend = AWS() cluster = cluster_instances.cluster cluster_install_dcos_from_url( cluster=cluster, cluster_representation=cluster_instances, dcos_config=dcos_config, dcos_installer=installer_url, doctor_message=doctor_message, files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir, ip_detect_path=cluster_backend.ip_detect_path, enable_spinner=enable_spinner, ) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, enable_spinner=enable_spinner, )
def create( ctx: click.core.Context, agents: int, installer: Path, extra_config: Dict[str, Any], masters: int, public_agents: int, variant: str, workspace_dir: Path, license_key: Optional[Path], security_mode: Optional[str], copy_to_master: List[Tuple[Path, Path]], cluster_id: str, enable_selinux_enforcing: bool, genconf_dir: Optional[Path], wait_for_dcos: bool, ) -> None: """ Create a DC/OS cluster. """ check_cluster_id_unique( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_command_name = command_path(sibling_ctx=ctx, command=wait) doctor_message = get_doctor_message( doctor_command_name=doctor_command_name, ) dcos_variant = get_install_variant( given_variant=variant, installer_path=installer, workspace_dir=workspace_dir, doctor_message=doctor_message, ) description = { CLUSTER_ID_DESCRIPTION_KEY: cluster_id, WORKSPACE_DIR_DESCRIPTION_KEY: str(workspace_dir), } cluster_backend = Vagrant( workspace_dir=workspace_dir, virtualbox_description=json.dumps(obj=description), ) cluster = create_cluster( cluster_backend=cluster_backend, masters=masters, agents=agents, public_agents=public_agents, doctor_message=doctor_message, ) nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents} for node in nodes: if enable_selinux_enforcing: node.run(args=['setenforce', '1'], sudo=True) for node in cluster.masters: for path_pair in copy_to_master: local_path, remote_path = path_pair node.send_file( local_path=local_path, remote_path=remote_path, ) cluster_vms = ClusterVMs(cluster_id=cluster_id) dcos_config = get_config( cluster_representation=cluster_vms, extra_config=extra_config, dcos_variant=dcos_variant, security_mode=security_mode, license_key=license_key, ) install_dcos_from_path( cluster_representation=cluster_vms, dcos_config=dcos_config, ip_detect_path=cluster_backend.ip_detect_path, doctor_message=doctor_message, dcos_installer=installer, local_genconf_dir=genconf_dir, ) run_post_install_steps( cluster=cluster, cluster_id=cluster_id, dcos_config=dcos_config, doctor_command_name=doctor_command_name, http_checks=True, wait_command_name=wait_command_name, wait_for_dcos=wait_for_dcos, )