Пример #1
0
def _oss_distribution_test(
    distribution: Distribution,
    oss_installer_url: str,
) -> None:
    """
    Assert that given a ``linux_distribution``, an open source DC/OS
    ``Cluster`` with the Linux distribution is started.

    We use this rather than pytest parameterization so that we can separate
    the tests in ``.travis.yml``.
    """
    cluster_backend = AWS(linux_distribution=distribution)
    with Cluster(
            cluster_backend=cluster_backend,
            masters=1,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_url(
            dcos_installer=oss_installer_url,
            dcos_config=cluster.base_config,
            output=Output.CAPTURE,
            ip_detect_path=cluster_backend.ip_detect_path,
        )
        cluster.wait_for_dcos_oss()
        (master, ) = cluster.masters
        node_distribution = _get_node_distribution(node=master)

    assert node_distribution == distribution
Пример #2
0
    def test_custom(self) -> None:
        """
        It is possible to set node EC2 instance tags.
        """
        cluster_key = uuid.uuid4().hex
        cluster_value = uuid.uuid4().hex
        cluster_tags = {cluster_key: cluster_value}

        master_key = uuid.uuid4().hex
        master_value = uuid.uuid4().hex
        master_tags = {master_key: master_value}

        agent_key = uuid.uuid4().hex
        agent_value = uuid.uuid4().hex
        agent_tags = {agent_key: agent_value}

        public_agent_key = uuid.uuid4().hex
        public_agent_value = uuid.uuid4().hex
        public_agent_tags = {public_agent_key: public_agent_value}

        cluster_backend = AWS(
            ec2_instance_tags=cluster_tags,
            master_ec2_instance_tags=master_tags,
            agent_ec2_instance_tags=agent_tags,
            public_agent_ec2_instance_tags=public_agent_tags,
        )

        with Cluster(cluster_backend=cluster_backend) as cluster:
            for node in cluster.masters:
                node_instance = _get_ec2_instance_from_node(
                    node=node,
                    aws_region=cluster_backend.aws_region,
                )
                node_tags = _tag_dict(instance=node_instance)
                assert node_tags[cluster_key] == cluster_value
                assert node_tags[master_key] == master_value
                assert agent_key not in node_tags
                assert public_agent_key not in node_tags

            for node in cluster.agents:
                node_instance = _get_ec2_instance_from_node(
                    node=node,
                    aws_region=cluster_backend.aws_region,
                )
                node_tags = _tag_dict(instance=node_instance)
                assert node_tags[cluster_key] == cluster_value
                assert node_tags[agent_key] == agent_value
                assert master_key not in node_tags
                assert public_agent_key not in node_tags

            for node in cluster.public_agents:
                node_instance = _get_ec2_instance_from_node(
                    node=node,
                    aws_region=cluster_backend.aws_region,
                )
                node_tags = _tag_dict(instance=node_instance)
                assert node_tags[cluster_key] == cluster_value
                assert node_tags[public_agent_key] == public_agent_value
                assert master_key not in node_tags
                assert agent_key not in node_tags
Пример #3
0
 def test_destroy_node(self) -> None:
     """
     Destroying a particular node is not supported on the AWS backend.
     """
     with Cluster(cluster_backend=AWS()) as cluster:
         (agent, ) = cluster.agents
         with pytest.raises(NotImplementedError):
             cluster.destroy_node(node=agent)
Пример #4
0
    def base_config(self) -> Dict[str, Any]:
        """
        Return a base configuration for installing DC/OS OSS.
        """
        backend = AWS()

        return {
            **self.cluster.base_config,
            **backend.base_config,
        }
Пример #5
0
    def test_install_dcos_from_path(self, oss_artifact: Path) -> None:
        """
        It is possible to install DC/OS on an AWS cluster from a local path.
        """
        with Cluster(cluster_backend=AWS()) as cluster:
            cluster.install_dcos_from_path(
                build_artifact=oss_artifact,
                dcos_config=cluster.base_config,
            )

            cluster.wait_for_dcos_oss()
Пример #6
0
    def test_linux_distribution_coreos(self) -> None:
        """
        The AWS backend does not support the COREOS Linux distribution.
        """
        with pytest.raises(NotImplementedError) as excinfo:
            AWS(linux_distribution=Distribution.COREOS)

        expected_error = (
            'The COREOS Linux distribution is currently not supported by '
            'the AWS backend.')

        assert str(excinfo.value) == expected_error
Пример #7
0
    def test_run_enterprise_integration_test(
        self,
        ee_installer_url: str,
        license_key_contents: str,
        linux_distribution: Distribution,
    ) -> None:
        """
        It is possible to run DC/OS integration tests on AWS.
        This test module only requires a single master node.
        """
        superuser_username = str(uuid.uuid4())
        superuser_password = str(uuid.uuid4())
        config = {
            'superuser_username': superuser_username,
            'superuser_password_hash': sha512_crypt.hash(superuser_password),
            'fault_domain_enabled': False,
            'license_key_contents': license_key_contents,
            'security': 'strict',
        }

        cluster_backend = AWS(linux_distribution=linux_distribution)

        with Cluster(
                cluster_backend=cluster_backend,
                masters=1,
        ) as cluster:

            cluster.install_dcos_from_url(
                dcos_installer=ee_installer_url,
                dcos_config={
                    **cluster.base_config,
                    **config,
                },
                output=Output.LOG_AND_CAPTURE,
                ip_detect_path=cluster_backend.ip_detect_path,
            )

            cluster.wait_for_dcos_ee(
                superuser_username=superuser_username,
                superuser_password=superuser_password,
            )

            # No error is raised with a successful command.
            # We choose a test file which runs very quickly.
            fast_test_file = 'test_marathon_authn_authz.py'
            cluster.run_with_test_environment(
                args=['pytest', '-vvv', '-s', '-x', fast_test_file],
                env={
                    'DCOS_LOGIN_UNAME': superuser_username,
                    'DCOS_LOGIN_PW': superuser_password,
                },
                output=Output.LOG_AND_CAPTURE,
            )
Пример #8
0
    def test_linux_distribution_ubuntu(self) -> None:
        """
        The AWS backend does not support the Ubuntu Linux distribution.
        """
        with pytest.raises(NotImplementedError) as excinfo:
            AWS(linux_distribution=Distribution.UBUNTU_16_04)

        expected_error = (
            'The UBUNTU_16_04 Linux distribution is currently not supported '
            'by the AWS backend.')

        assert str(excinfo.value) == expected_error
Пример #9
0
    def destroy(self) -> None:
        """
        Destroy this cluster.
        """
        backend = AWS()
        deployment_name = self._cluster_id
        masters = len(self.masters)
        agents = len(self.agents)
        public_agents = len(self.public_agents)

        # We need this to be set but not necessarily correct.
        aws_instance_type = backend.aws_instance_type

        launch_config = {
            'admin_location': backend.admin_location,
            'aws_region': self._aws_region,
            'deployment_name': deployment_name,
            'installer_url': 'https://example.com',
            'instance_type': aws_instance_type,
            'launch_config_version': 1,
            'num_masters': masters,
            'num_private_agents': agents,
            'num_public_agents': public_agents,
            'platform': 'aws',
            'provider': 'onprem',
        }

        launch_config['dcos_config'] = backend.base_config
        validated_launch_config = config.get_validated_config(
            user_config=launch_config,
            config_dir=str(self._workspace_dir),
        )
        cloudformation = boto3.resource(
            'cloudformation',
            region_name=self._aws_region,
        )
        stack_filter = cloudformation.stacks.filter(StackName=self._cluster_id)
        filtered_stacks = stack_filter.all()
        [stack] = list(filtered_stacks)
        stack_id = stack.stack_id
        launcher = get_launcher(  # type: ignore
            config=validated_launch_config, )
        # This matches what happens in
        # ``dcos_launch.aws.DcosCloudformationLauncher.create``.
        launcher.config['stack_id'] = stack_id
        key_helper_details = launcher.key_helper()  #
        zen_helper_details = launcher.zen_helper()
        launcher.config['temp_resources'] = {
            **key_helper_details,
            **zen_helper_details,
        }
        launcher.delete()
Пример #10
0
 def test_install_dcos_from_path(self, oss_artifact: Path) -> None:
     """
     It is possible to install DC/OS on an AWS cluster from a local path.
     """
     cluster_backend = AWS()
     with Cluster(cluster_backend=cluster_backend) as cluster:
         cluster.install_dcos_from_path(
             build_artifact=oss_artifact,
             dcos_config=cluster.base_config,
             ip_detect_path=cluster_backend.ip_detect_path,
             log_output_live=True,
         )
         cluster.wait_for_dcos_oss()
Пример #11
0
 def test_install_dcos_from_path(self, oss_installer: Path) -> None:
     """
     It is possible to install DC/OS on an AWS cluster from a local path.
     """
     cluster_backend = AWS()
     with Cluster(cluster_backend=cluster_backend) as cluster:
         cluster.install_dcos_from_path(
             dcos_installer=oss_installer,
             dcos_config=cluster.base_config,
             ip_detect_path=cluster_backend.ip_detect_path,
             output=Output.LOG_AND_CAPTURE,
         )
         cluster.wait_for_dcos_oss()
Пример #12
0
    def test_run_enterprise_integration_test(
        self,
        ee_artifact_url: str,
        license_key_contents: str,
        linux_distribution: Distribution,
    ) -> None:
        """
        It is possible to run DC/OS integration tests on AWS.
        This test module only requires a single master node.
        """
        superuser_username = str(uuid.uuid4())
        superuser_password = str(uuid.uuid4())
        config = {
            'superuser_username': superuser_username,
            'superuser_password_hash': sha512_crypt.hash(superuser_password),
            'fault_domain_enabled': False,
            'license_key_contents': license_key_contents,
            'security': 'strict',
        }

        cluster_backend = AWS(linux_distribution=linux_distribution)

        with Cluster(
            cluster_backend=cluster_backend,
            masters=1,
        ) as cluster:

            cluster.install_dcos_from_url(
                build_artifact=ee_artifact_url,
                dcos_config={
                    **cluster.base_config,
                    **config,
                },
                log_output_live=True,
                ip_detect_path=cluster_backend.ip_detect_path,
            )

            cluster.wait_for_dcos_ee(
                superuser_username=superuser_username,
                superuser_password=superuser_password,
            )

            # No error is raised with a successful command.
            cluster.run_integration_tests(
                pytest_command=['pytest', '-vvv', '-s', '-x', 'test_tls.py'],
                env={
                    'DCOS_LOGIN_UNAME': superuser_username,
                    'DCOS_LOGIN_PW': superuser_password,
                },
                log_output_live=True,
            )
Пример #13
0
def aws_instance_type_option(command: Callable[..., None],
                             ) -> Callable[..., None]:
    """
    An option decorator for AWS instance types.
    """
    default_instance_type = AWS().aws_instance_type
    function = click.option(
        '--aws-instance-type',
        type=str,
        default=default_instance_type,
        show_default=True,
        help='The AWS instance type to use.',
    )(command)  # type: Callable[..., None]
    return function
Пример #14
0
def aws_region_option(command: Callable[..., None]) -> Callable[..., None]:
    """
    An option decorator for AWS regions.
    """
    default_region = AWS().aws_region

    function = click.option(
        '--aws-region',
        type=str,
        default=default_region,
        show_default=True,
        help='The AWS region to use.',
    )(command)  # type: Callable[..., None]
    return function
Пример #15
0
    def test_copy_to_installer_not_supported(self) -> None:
        """
        The AWS backend does not support copying files to the installer.
        """
        with pytest.raises(NotImplementedError) as excinfo:
            Cluster(
                cluster_backend=AWS(),
                files_to_copy_to_installer={Path('/'): Path('/')},
            )

        expected_error = (
            'Copying files to the installer is currently not supported by the '
            'AWS backend.')

        assert str(excinfo.value) == expected_error
Пример #16
0
def _enterprise_distribution_test(
    distribution: Distribution,
    ee_installer_url: str,
    license_key_contents: str,
) -> None:
    """
    Assert that given a ``linux_distribution``, a DC/OS Enterprise ``Cluster``
    with the Linux distribution is started.

    We use this rather than pytest parameterization so that we can separate
    the tests in ``.travis.yml``.
    """
    superuser_username = str(uuid.uuid4())
    superuser_password = str(uuid.uuid4())
    config = {
        'superuser_username': superuser_username,
        'superuser_password_hash': sha512_crypt.hash(superuser_password),
        'fault_domain_enabled': False,
        'license_key_contents': license_key_contents,
    }

    cluster_backend = AWS(linux_distribution=distribution)
    with Cluster(
            cluster_backend=cluster_backend,
            masters=1,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_url(
            dcos_installer=ee_installer_url,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            ip_detect_path=cluster_backend.ip_detect_path,
            output=Output.CAPTURE,
        )
        cluster.wait_for_dcos_ee(
            superuser_username=superuser_username,
            superuser_password=superuser_password,
        )
        (master, ) = cluster.masters
        node_distribution = _get_node_distribution(node=master)

    assert node_distribution == distribution
Пример #17
0
    def test_default_distribution(self) -> None:
        """
        The default Linux distribution is CentOS 7.

        This test does not wait for DC/OS and we do not test DC/OS Enterprise
        because these are covered by other tests which use the default
        settings.
        """
        with Cluster(
                cluster_backend=AWS(),
                masters=1,
                agents=0,
                public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            node_distribution = _get_node_distribution(node=master)

        assert node_distribution == Distribution.CENTOS_7
Пример #18
0
    def test_install_dcos_with_custom_genconf(
        self,
        oss_installer_url: str,
        tmp_path: Path,
    ) -> None:
        """
        It is possible to install DC/OS on an AWS including
        custom files in the ``genconf`` directory.
        """
        cluster_backend = AWS()
        with Cluster(
            cluster_backend=cluster_backend,
            agents=0,
            public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            ip_detect_file = tmp_path / 'ip-detect'
            ip_detect_contents = dedent(
                """\
                #!/bin/bash
                echo {ip_address}
                """,
            ).format(ip_address=master.private_ip_address)
            ip_detect_file.write_text(ip_detect_contents)

            cluster.install_dcos_from_url(
                dcos_installer=oss_installer_url,
                dcos_config=cluster.base_config,
                output=Output.LOG_AND_CAPTURE,
                ip_detect_path=cluster_backend.ip_detect_path,
                files_to_copy_to_genconf_dir=[
                    (ip_detect_file, Path('/genconf/ip-detect')),
                ],
            )
            cluster.wait_for_dcos_oss()
            cat_result = master.run(
                args=['cat', '/opt/mesosphere/bin/detect_ip'],
            )
            node_script_contents = cat_result.stdout.decode()
            assert node_script_contents == ip_detect_contents
            backend_script_path = cluster_backend.ip_detect_path
            backend_script_contents = backend_script_path.read_text()
            assert node_script_contents != backend_script_contents
Пример #19
0
    def test_install_dcos_with_custom_ip_detect(
        self,
        oss_artifact_url: str,
        tmpdir: local,
    ) -> None:
        """
        It is possible to install DC/OS on an AWS with a custom IP detect
        script.
        """
        cluster_backend = AWS()
        with Cluster(
            cluster_backend=cluster_backend,
            agents=0,
            public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            ip_detect_file = tmpdir.join('ip-detect')
            ip_detect_contents = dedent(
                """\
                #!/bin/bash
                echo {ip_address}
                """,
            ).format(ip_address=master.private_ip_address)
            ip_detect_file.write(ip_detect_contents)

            cluster.install_dcos_from_url(
                build_artifact=oss_artifact_url,
                dcos_config=cluster.base_config,
                log_output_live=True,
                ip_detect_path=Path(str(ip_detect_file)),
            )
            cluster.wait_for_dcos_oss()
            cat_result = master.run(
                args=['cat', '/opt/mesosphere/bin/detect_ip'],
            )
            node_script_contents = cat_result.stdout.decode()
            assert node_script_contents == ip_detect_contents
            backend_script_path = cluster_backend.ip_detect_path
            backend_script_contents = backend_script_path.read_text()
            assert node_script_contents != backend_script_contents
Пример #20
0
 def test_install_dcos_from_node(
     self,
     oss_installer_url: str,
 ) -> None:
     """
     It is possible to install DC/OS on an AWS cluster node by node.
     """
     cluster_backend = AWS()
     with Cluster(
             cluster_backend=cluster_backend,
             agents=0,
             public_agents=0,
     ) as cluster:
         (master, ) = cluster.masters
         master.install_dcos_from_url(
             dcos_installer=oss_installer_url,
             dcos_config=cluster.base_config,
             role=Role.MASTER,
             output=Output.LOG_AND_CAPTURE,
             ip_detect_path=cluster_backend.ip_detect_path,
         )
         cluster.wait_for_dcos_oss()
Пример #21
0
 def test_install_dcos_from_node(
     self,
     oss_artifact_url: str,
 ) -> None:
     """
     It is possible to install DC/OS on an AWS cluster node by node.
     """
     cluster_backend = AWS()
     with Cluster(
         cluster_backend=cluster_backend,
         agents=0,
         public_agents=0,
     ) as cluster:
         (master, ) = cluster.masters
         master.install_dcos_from_url(
             build_artifact=oss_artifact_url,
             dcos_config=cluster.base_config,
             role=Role.MASTER,
             log_output_live=True,
             ip_detect_path=cluster_backend.ip_detect_path,
         )
         cluster.wait_for_dcos_oss()
Пример #22
0
    def test_install_dcos_from_path(self) -> None:
        """
        The AWS backend requires a build artifact URL in order to launch a
        DC/OS cluster.
        """
        with Cluster(
                cluster_backend=AWS(),
                masters=1,
                agents=0,
                public_agents=0,
        ) as cluster:
            with pytest.raises(NotImplementedError) as excinfo:
                cluster.install_dcos_from_path(
                    build_artifact=Path('/foo'),
                    dcos_config=cluster.base_config,
                )

        expected_error = (
            'The AWS backend does not support the installation of build '
            'artifacts passed via path. This is because a more efficient'
            'installation method exists in ``install_dcos_from_url``.')

        assert str(excinfo.value) == expected_error
Пример #23
0
    def test_custom_key_pair(self, tmp_path: Path) -> None:
        """
        It is possible to pass a custom key pair to the AWS backend.
        """
        key_name = 'e2e-test-{random}'.format(random=uuid.uuid4().hex)
        private_key_path = tmp_path / 'private_key'
        public_key_path = tmp_path / 'public_key'
        _write_key_pair(
            public_key_path=public_key_path,
            private_key_path=private_key_path,
        )
        backend = AWS(aws_key_pair=(key_name, private_key_path))
        region_name = backend.aws_region
        ec2 = boto3.client('ec2', region_name=region_name)
        ec2.import_key_pair(
            KeyName=key_name,
            PublicKeyMaterial=public_key_path.read_bytes(),
        )

        try:
            with Cluster(
                    cluster_backend=backend,
                    agents=0,
                    public_agents=0,
            ) as cluster:
                (master, ) = cluster.masters
                node = Node(
                    public_ip_address=master.public_ip_address,
                    private_ip_address=master.private_ip_address,
                    default_user=master.default_user,
                    ssh_key_path=private_key_path,
                )

                node.run(args=['echo', '1'])
        finally:
            ec2.delete_key_pair(KeyName=key_name)
Пример #24
0
 def test_admin_location(self) -> None:
     """
     The default ``admin_location`` is correct.
     """
     assert AWS().admin_location == '0.0.0.0/0'
Пример #25
0
 def test_aws_instance_type(self) -> None:
     """
     The default ``aws_instance_type`` is correct.
     """
     assert AWS().aws_instance_type == 'm4.large'
Пример #26
0
def provision(
    ctx: click.core.Context,
    agents: int,
    masters: int,
    public_agents: int,
    workspace_dir: Path,
    copy_to_master: List[Tuple[Path, Path]],
    aws_instance_type: str,
    aws_region: str,
    linux_distribution: str,
    cluster_id: str,
    enable_selinux_enforcing: bool,
    custom_tag: Dict[str, str],
    enable_spinner: bool,
) -> None:
    """
    Provision an AWS cluster to install DC/OS.
    """
    check_cluster_id_unique(
        new_cluster_id=cluster_id,
        existing_cluster_ids=existing_cluster_ids(aws_region=aws_region),
    )
    ssh_keypair_dir = workspace_dir / 'ssh'
    ssh_keypair_dir.mkdir(parents=True)
    key_name = 'key-{random}'.format(random=uuid.uuid4().hex)
    public_key_path = ssh_keypair_dir / 'id_rsa.pub'
    private_key_path = ssh_keypair_dir / 'id_rsa'
    write_key_pair(
        public_key_path=public_key_path,
        private_key_path=private_key_path,
    )

    ec2 = boto3.resource('ec2', region_name=aws_region)
    ec2.import_key_pair(
        KeyName=key_name,
        PublicKeyMaterial=public_key_path.read_bytes(),
    )

    doctor_command_name = command_path(sibling_ctx=ctx, command=doctor)
    doctor_message = get_doctor_message(
        doctor_command_name=doctor_command_name, )
    ssh_user = {
        Distribution.CENTOS_7: 'centos',
        Distribution.UBUNTU_16_04: 'ubuntu',
        Distribution.RHEL_7: 'ec2-user',
    }

    distribution = LINUX_DISTRIBUTIONS[linux_distribution]

    default_user = ssh_user[distribution]

    cluster_tags = {
        SSH_USER_TAG_KEY: default_user,
        CLUSTER_ID_TAG_KEY: cluster_id,
        WORKSPACE_DIR_TAG_KEY: str(workspace_dir),
        KEY_NAME_TAG_KEY: key_name,
        **custom_tag,
    }

    master_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_MASTER_TAG_VALUE}
    agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_AGENT_TAG_VALUE}
    public_agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_PUBLIC_AGENT_TAG_VALUE}
    cluster_backend = AWS(
        aws_key_pair=(key_name, private_key_path),
        workspace_dir=workspace_dir,
        aws_instance_type=aws_instance_type,
        aws_region=aws_region,
        linux_distribution=distribution,
        ec2_instance_tags=cluster_tags,
        master_ec2_instance_tags=master_tags,
        agent_ec2_instance_tags=agent_tags,
        public_agent_ec2_instance_tags=public_agent_tags,
        aws_cloudformation_stack_name=cluster_id,
    )

    cluster = create_cluster(
        cluster_backend=cluster_backend,
        masters=masters,
        agents=agents,
        public_agents=public_agents,
        doctor_message=doctor_message,
        enable_spinner=enable_spinner,
    )

    nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents}
    for node in nodes:
        if enable_selinux_enforcing:
            node.run(args=['setenforce', '1'], sudo=True)

    for node in cluster.masters:
        for path_pair in copy_to_master:
            local_path, remote_path = path_pair
            node.send_file(
                local_path=local_path,
                remote_path=remote_path,
                sudo=True,
            )
Пример #27
0
def run_tests(e2e_backend, installer_url, dcos_license, dcos_url,
              admin_username, admin_password, ssh_user, ssh_key_path):

    os.environ["CLI_TEST_SSH_USER"] = ssh_user
    os.environ["CLI_TEST_MASTER_PROXY"] = "1"
    os.environ["CLI_TEST_SSH_KEY_PATH"] = ssh_key_path

    # extra dcos_config (for dcos_launch and dcos_docker backends)
    extra_config = {
        'superuser_username': admin_username,
        'superuser_password_hash': sha512_crypt.hash(admin_password),
        'fault_domain_enabled': False,
        'license_key_contents': dcos_license,
    }

    if e2e_backend == 'dcos_launch':
        cluster_backend = AWS()

        with Cluster(cluster_backend=cluster_backend, agents=1) as cluster:
            dcos_config = {**cluster.base_config, **extra_config}

            cluster.install_dcos_from_url(
                build_artifact=installer_url,
                dcos_config=dcos_config,
                log_output_live=True,
            )

            os.environ["CLI_TEST_SSH_KEY_PATH"] = str(
                cluster._cluster._ssh_key_path)

            _run_tests(cluster, admin_username, admin_password)
    elif e2e_backend == 'dcos_docker':
        dcos_ee_installer_filename = 'dcos_generate_config.ee.sh'
        dcos_ee_installer_path = Path.cwd() / Path(dcos_ee_installer_filename)

        if not dcos_ee_installer_path.exists():
            urllib.request.urlretrieve(installer_url,
                                       dcos_ee_installer_filename)

        with Cluster(cluster_backend=Docker(), agents=1) as cluster:
            dcos_config = {**cluster.base_config, **extra_config}

            cluster.install_dcos_from_path(
                build_artifact=dcos_ee_installer_path,
                dcos_config=dcos_config,
                log_output_live=True,
            )

            _run_tests(cluster, admin_username, admin_password)
    elif e2e_backend == 'existing':
        try:
            dcos_ip = IPv4Address(dcos_url)
        except ValueError:
            parsed_dcos_url = urlparse(dcos_url)
            dcos_hostname = parsed_dcos_url.hostname
            dcos_ip = IPv4Address(socket.gethostbyname(dcos_hostname))

        masters = set([
            Node(
                public_ip_address=dcos_ip,
                private_ip_address=dcos_ip,
                ssh_key_path=Path(ssh_key_path),
                default_ssh_user=ssh_user,
            )
        ])

        cluster = Cluster.from_nodes(
            masters=masters,
            agents=set(),
            public_agents=set(),
        )

        _run_tests(cluster, admin_username, admin_password)
Пример #28
0
 def test_aws_region(self) -> None:
     """
     The default ``aws_region`` is correct.
     """
     assert AWS().aws_region == 'us-west-2'
Пример #29
0
 def test_linux_distribution(self) -> None:
     """
     The default ``linux_distribution`` is correct.
     """
     assert AWS().linux_distribution == Distribution.CENTOS_7
Пример #30
0
def create(
    ctx: click.core.Context,
    agents: int,
    installer_url: str,
    extra_config: Dict[str, Any],
    masters: int,
    public_agents: int,
    variant: str,
    workspace_dir: Optional[Path],
    license_key: Optional[str],
    security_mode: Optional[str],
    copy_to_master: List[Tuple[Path, Path]],
    verbose: int,
    aws_region: str,
    linux_distribution: str,
    cluster_id: str,
    enable_selinux_enforcing: bool,
    genconf_dir: Optional[Path],
    custom_tag: Dict[str, str],
    wait_for_dcos: bool,
) -> None:
    """
    Create a DC/OS cluster.

        DC/OS Enterprise

            \b
            DC/OS Enterprise clusters require different configuration variables to DC/OS OSS.
            For example, enterprise clusters require the following configuration parameters:

            ``superuser_username``, ``superuser_password_hash``, ``fault_domain_enabled``, ``license_key_contents``

            \b
            These can all be set in ``--extra-config``.
            However, some defaults are provided for all but the license key.

            \b
            The default superuser username is ``admin``.
            The default superuser password is ``admin``.
            The default ``fault_domain_enabled`` is ``false``.

            \b
            ``license_key_contents`` must be set for DC/OS Enterprise 1.11 and above.
            This is set to one of the following, in order:

            \b
            * The ``license_key_contents`` set in ``--extra-config``.
            * The contents of the path given with ``--license-key``.
            * The contents of the path set in the ``DCOS_LICENSE_KEY_PATH`` environment variable.

            \b
            If none of these are set, ``license_key_contents`` is not given.
    """  # noqa: E501
    set_logging(verbosity_level=verbose)
    check_cluster_id_unique(
        new_cluster_id=cluster_id,
        existing_cluster_ids=existing_cluster_ids(aws_region=aws_region),
    )
    base_workspace_dir = workspace_dir or Path(tempfile.gettempdir())
    workspace_dir = base_workspace_dir / uuid.uuid4().hex
    workspace_dir.mkdir(parents=True)
    ssh_keypair_dir = workspace_dir / 'ssh'
    ssh_keypair_dir.mkdir(parents=True)
    key_name = 'key-{random}'.format(random=uuid.uuid4().hex)
    public_key_path = ssh_keypair_dir / 'id_rsa.pub'
    private_key_path = ssh_keypair_dir / 'id_rsa'
    write_key_pair(
        public_key_path=public_key_path,
        private_key_path=private_key_path,
    )

    ec2 = boto3.resource('ec2', region_name=aws_region)
    ec2.import_key_pair(
        KeyName=key_name,
        PublicKeyMaterial=public_key_path.read_bytes(),
    )

    doctor_message = get_doctor_message(sibling_ctx=ctx, doctor_command=doctor)
    dcos_variant = get_variant(
        given_variant=variant,
        installer_path=None,
        workspace_dir=workspace_dir,
        doctor_message=doctor_message,
    )
    variant_tag_value = {
        DCOSVariant.OSS: VARIANT_OSS_TAG_VALUE,
        DCOSVariant.ENTERPRISE: VARIANT_ENTERPRISE_TAG_VALUE,
    }[dcos_variant]

    ssh_user = {
        Distribution.CENTOS_7: 'centos',
        Distribution.COREOS: 'core',
        Distribution.UBUNTU_16_04: 'ubuntu',
        Distribution.RHEL_7: 'ec2-user',
    }

    distribution = LINUX_DISTRIBUTIONS[linux_distribution]

    default_user = ssh_user[distribution]

    cluster_tags = {
        SSH_USER_TAG_KEY: default_user,
        CLUSTER_ID_TAG_KEY: cluster_id,
        WORKSPACE_DIR_TAG_KEY: str(workspace_dir),
        KEY_NAME_TAG_KEY: key_name,
        VARIANT_TAG_KEY: variant_tag_value,
        **custom_tag,
    }

    master_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_MASTER_TAG_VALUE}
    agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_AGENT_TAG_VALUE}
    public_agent_tags = {NODE_TYPE_TAG_KEY: NODE_TYPE_PUBLIC_AGENT_TAG_VALUE}
    cluster_backend = AWS(
        aws_key_pair=(key_name, private_key_path),
        workspace_dir=workspace_dir,
        aws_region=aws_region,
        linux_distribution=distribution,
        ec2_instance_tags=cluster_tags,
        master_ec2_instance_tags=master_tags,
        agent_ec2_instance_tags=agent_tags,
        public_agent_ec2_instance_tags=public_agent_tags,
    )

    cluster = create_cluster(
        cluster_backend=cluster_backend,
        masters=masters,
        agents=agents,
        public_agents=public_agents,
        sibling_ctx=ctx,
        doctor_command=doctor,
    )

    nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents}
    for node in nodes:
        if enable_selinux_enforcing:
            node.run(args=['setenforce', '1'], sudo=True)

    for node in cluster.masters:
        for path_pair in copy_to_master:
            local_path, remote_path = path_pair
            node.send_file(
                local_path=local_path,
                remote_path=remote_path,
                sudo=True,
            )

    files_to_copy_to_genconf_dir = []
    if genconf_dir is not None:
        container_genconf_path = Path('/genconf')
        for genconf_file in genconf_dir.glob('*'):
            genconf_relative = genconf_file.relative_to(genconf_dir)
            relative_path = container_genconf_path / genconf_relative
            files_to_copy_to_genconf_dir.append((genconf_file, relative_path))

    dcos_config = get_config(
        cluster=cluster,
        extra_config=extra_config,
        dcos_variant=dcos_variant,
        security_mode=security_mode,
        license_key=license_key,
    )

    try:
        with click_spinner.spinner():
            cluster.install_dcos_from_url(
                dcos_installer=installer_url,
                dcos_config=dcos_config,
                ip_detect_path=cluster_backend.ip_detect_path,
                files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir,
            )
    except CalledProcessError as exc:
        click.echo('Error installing DC/OS.', err=True)
        click.echo(doctor_message)
        cluster.destroy()
        sys.exit(exc.returncode)

    superuser_username = dcos_config.get(
        'superuser_username',
        DEFAULT_SUPERUSER_USERNAME,
    )

    superuser_password = dcos_config.get(
        'superuser_password',
        DEFAULT_SUPERUSER_PASSWORD,
    )

    if wait_for_dcos:
        dcos_e2e_cli.common.wait.wait_for_dcos(
            dcos_variant=dcos_variant,
            cluster=cluster,
            superuser_username=superuser_username,
            superuser_password=superuser_password,
            http_checks=True,
            doctor_command=doctor,
            sibling_ctx=ctx,
        )
        return

    show_cluster_started_message(
        # We work on the assumption that the ``wait`` command is a sibling
        # command of this one.
        sibling_ctx=ctx,
        wait_command=wait,
        cluster_id=cluster_id,
    )

    click.echo(cluster_id)