Exemple #1
0
    def test_default(self) -> None:
        """
        The default Linux distribution is CentOS 7.

        This test does not wait for DC/OS and we do not test DC/OS Enterprise
        because these are covered by other tests which use the default
        settings.
        """
        with Cluster(
                cluster_backend=Docker(),
                masters=1,
                agents=0,
                public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            node_distribution = _get_node_distribution(node=master)

        assert node_distribution == Distribution.CENTOS_7

        with Cluster(
                # The distribution is also CentOS 7 if it is explicitly set.
                cluster_backend=Docker(
                    linux_distribution=Distribution.CENTOS_7),
                masters=1,
                agents=0,
                public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            node_distribution = _get_node_distribution(node=master)

        assert node_distribution == Distribution.CENTOS_7
Exemple #2
0
 def test_two_clusters(self, cluster_backend: ClusterBackend) -> None:
     """
     It is possible to start two clusters.
     """
     # What is not tested here is that two cluster installations of DC/OS
     # can be started at the same time.
     with Cluster(cluster_backend=cluster_backend):
         with Cluster(cluster_backend=cluster_backend):
             pass
Exemple #3
0
 def test_two_clusters(
     self,
     cluster_backend: ClusterBackend,
     oss_artifact: Path,
 ) -> None:
     """
     It is possible to start two clusters.
     """
     with Cluster(cluster_backend=cluster_backend) as cluster:
         cluster.install_dcos_from_path(oss_artifact)
         with Cluster(cluster_backend=cluster_backend) as cluster:
             cluster.install_dcos_from_path(oss_artifact)
    def test_existing_cluster(self, oss_artifact: Path) -> None:
        """
        It is possible to create a cluster from existing nodes, but not destroy
        it.
        """
        with Cluster(
                cluster_backend=DCOS_Docker(),
                generate_config_path=oss_artifact,
                masters=1,
                agents=1,
                public_agents=1,
                destroy_on_success=False,
        ) as cluster:
            (master, ) = cluster.masters
            (agent, ) = cluster.agents
            (public_agent, ) = cluster.public_agents

            existing_cluster = ExistingCluster(
                masters=cluster.masters,
                agents=cluster.agents,
                public_agents=cluster.public_agents,
            )

            with Cluster(
                    cluster_backend=existing_cluster,
                    masters=len(cluster.masters),
                    agents=len(cluster.agents),
                    public_agents=len(cluster.public_agents),
                    destroy_on_success=False,
                    destroy_on_error=False,
            ) as duplicate_cluster:
                (duplicate_master, ) = duplicate_cluster.masters
                (duplicate_agent, ) = duplicate_cluster.agents
                (duplicate_public_agent, ) = duplicate_cluster.public_agents

                duplicate_master.run_as_root(
                    args=['touch', 'example_master_file'], )
                duplicate_agent.run_as_root(
                    args=['touch', 'example_agent_file'], )
                duplicate_public_agent.run_as_root(
                    args=['touch', 'example_public_agent_file'], )

                master.run_as_root(args=['test', '-f', 'example_master_file'])
                agent.run_as_root(args=['test', '-f', 'example_agent_file'])
                public_agent.run_as_root(
                    args=['test', '-f', 'example_public_agent_file'], )

            with pytest.raises(NotImplementedError):
                duplicate_cluster.destroy()

            cluster.destroy()
Exemple #5
0
    def test_custom(self) -> None:
        """
        It is possible to set node EC2 instance tags.
        """
        cluster_key = uuid.uuid4().hex
        cluster_value = uuid.uuid4().hex
        cluster_tags = {cluster_key: cluster_value}

        master_key = uuid.uuid4().hex
        master_value = uuid.uuid4().hex
        master_tags = {master_key: master_value}

        agent_key = uuid.uuid4().hex
        agent_value = uuid.uuid4().hex
        agent_tags = {agent_key: agent_value}

        public_agent_key = uuid.uuid4().hex
        public_agent_value = uuid.uuid4().hex
        public_agent_tags = {public_agent_key: public_agent_value}

        cluster_backend = AWS(
            ec2_instance_tags=cluster_tags,
            master_ec2_instance_tags=master_tags,
            agent_ec2_instance_tags=agent_tags,
            public_agent_ec2_instance_tags=public_agent_tags,
        )

        with Cluster(cluster_backend=cluster_backend) as cluster:
            for node in cluster.masters:
                node_instance = _get_ec2_instance_from_node(
                    node=node,
                    aws_region=cluster_backend.aws_region,
                )
                node_tags = _tag_dict(instance=node_instance)
                assert node_tags[cluster_key] == cluster_value
                assert node_tags[master_key] == master_value
                assert agent_key not in node_tags
                assert public_agent_key not in node_tags

            for node in cluster.agents:
                node_instance = _get_ec2_instance_from_node(
                    node=node,
                    aws_region=cluster_backend.aws_region,
                )
                node_tags = _tag_dict(instance=node_instance)
                assert node_tags[cluster_key] == cluster_value
                assert node_tags[agent_key] == agent_value
                assert master_key not in node_tags
                assert public_agent_key not in node_tags

            for node in cluster.public_agents:
                node_instance = _get_ec2_instance_from_node(
                    node=node,
                    aws_region=cluster_backend.aws_region,
                )
                node_tags = _tag_dict(instance=node_instance)
                assert node_tags[cluster_key] == cluster_value
                assert node_tags[public_agent_key] == public_agent_value
                assert master_key not in node_tags
                assert agent_key not in node_tags
Exemple #6
0
def _oss_distribution_test(
    distribution: Distribution,
    oss_artifact: Path,
) -> None:
    """
    Assert that given a ``linux_distribution``, an open source DC/OS
    ``Cluster`` with the Linux distribution is started.

    We use this rather than pytest parameterization so that we can separate
    the tests in ``.travis.yml``.
    """
    with Cluster(
            cluster_backend=Docker(linux_distribution=distribution),
            masters=1,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            build_artifact=oss_artifact,
            dcos_config=cluster.base_config,
            log_output_live=True,
        )
        cluster.wait_for_dcos_oss()
        (master, ) = cluster.masters
        node_distribution = _get_node_distribution(node=master)

    assert node_distribution == distribution
def test_calico_disabled(docker_backend: Docker, artifact_path: Path,
                         request: SubRequest, log_dir: Path) -> None:
    with Cluster(
            cluster_backend=docker_backend,
            masters=1,
            agents=1,
            public_agents=1,
    ) as cluster:
        config = {"calico_enabled": "false"}
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )

        calico_units = [
            "dcos-calico-felix", "dcos-calico-bird", "dcos-calico-confd",
            "dcos-calico-libnetwork-plugin", "dcos-etcd"
        ]
        for node in cluster.masters | cluster.agents | cluster.public_agents:
            for unit_name in calico_units:
                assert_system_unit_state(node, unit_name, active=False)
Exemple #8
0
def create_cluster(
    cluster_backend: ClusterBackend,
    masters: int,
    agents: int,
    public_agents: int,
    sibling_ctx: click.core.Context,
    doctor_command: click.core.Command,
) -> Cluster:
    """
    Create a cluster.
    """
    doctor_message = get_doctor_message(
        sibling_ctx=sibling_ctx,
        doctor_command=doctor_command,
    )
    try:
        return Cluster(
            cluster_backend=cluster_backend,
            masters=masters,
            agents=agents,
            public_agents=public_agents,
        )
    except CalledProcessError as exc:
        click.echo('Error creating cluster.', err=True)
        click.echo(doctor_message)
        sys.exit(exc.returncode)
Exemple #9
0
    def test_one_master_host_port_map(self) -> None:
        """
        It is possible to expose admin router to a host port.
        """

        with Cluster(
                cluster_backend=Docker(
                    one_master_host_port_map={'80/tcp': 8000}),
                masters=3,
                agents=0,
                public_agents=0,
        ) as cluster:
            masters_containers = [
                _get_container_from_node(node=node) for node in cluster.masters
            ]

            masters_ports_settings = [
                container.attrs['HostConfig']['PortBindings']
                for container in masters_containers
            ]

            masters_ports_settings.remove(None)
            masters_ports_settings.remove(None)

            [master_port_settings] = masters_ports_settings
            expected_master_port_settings = {
                '80/tcp': [{
                    'HostIp': '',
                    'HostPort': '8000',
                }],
            }
            assert master_port_settings == expected_master_port_settings
    def test_run_integration_test(
        self,
        oss_artifact: Path,
    ) -> None:
        """
        It is possible to run DC/OS integration tests on Vagrant.
        This test module only requires a single master node.
        """
        cluster_backend = Vagrant()
        with Cluster(
                cluster_backend=cluster_backend,
                masters=1,
                agents=1,
                public_agents=1,
        ) as cluster:
            cluster.install_dcos_from_path(
                build_artifact=oss_artifact,
                dcos_config=cluster.base_config,
                log_output_live=True,
                ip_detect_path=cluster_backend.ip_detect_path,
            )

            cluster.wait_for_dcos_oss()

            # No error is raised with a successful command.
            cluster.run_integration_tests(
                pytest_command=['pytest', '-vvv', '-s', '-x', 'test_units.py'],
                log_output_live=True,
            )
Exemple #11
0
def _oss_distribution_test(
    distribution: Distribution,
    oss_installer_url: str,
) -> None:
    """
    Assert that given a ``linux_distribution``, an open source DC/OS
    ``Cluster`` with the Linux distribution is started.

    We use this rather than pytest parameterization so that we can separate
    the tests in ``.travis.yml``.
    """
    cluster_backend = AWS(linux_distribution=distribution)
    with Cluster(
            cluster_backend=cluster_backend,
            masters=1,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_url(
            dcos_installer=oss_installer_url,
            dcos_config=cluster.base_config,
            output=Output.CAPTURE,
            ip_detect_path=cluster_backend.ip_detect_path,
        )
        cluster.wait_for_dcos_oss()
        (master, ) = cluster.masters
        node_distribution = _get_node_distribution(node=master)

    assert node_distribution == distribution
Exemple #12
0
 def test_oss(
     self,
     cluster_backend: ClusterBackend,
     oss_1_9_installer: Path,
 ) -> None:
     """
     An open source DC/OS 1.9 cluster can be started.
     """
     with Cluster(cluster_backend=cluster_backend) as cluster:
         cluster.install_dcos_from_path(
             dcos_installer=oss_1_9_installer,
             dcos_config=cluster.base_config,
             output=Output.CAPTURE,
             ip_detect_path=cluster_backend.ip_detect_path,
         )
         cluster.wait_for_dcos_oss()
         # We check that the user created with the special credentials does
         # not exist after ``wait_for_dcos_oss``.
         email = '*****@*****.**'
         path = '/dcos/users/{email}'.format(email=email)
         (master, ) = cluster.masters
         zk_client_port = '2181'
         zk_host = str(master.public_ip_address)
         zk_client = KazooClient(hosts=zk_host + ':' + zk_client_port)
         zk_client.start()
         zk_user_exists = zk_client.exists(path=path)
         zk_client.stop()
         assert not zk_user_exists
Exemple #13
0
    def test_install_dcos_from_path(
        self,
        oss_installer: Path,
        cluster_backend: ClusterBackend,
    ) -> None:
        """
        DC/OS can be installed on an existing cluster from a path.
        """
        with Cluster(
                cluster_backend=cluster_backend,
                masters=1,
                agents=0,
                public_agents=0,
        ) as original_cluster:
            cluster = Cluster.from_nodes(
                masters=original_cluster.masters,
                agents=original_cluster.agents,
                public_agents=original_cluster.public_agents,
            )

            cluster.install_dcos_from_path(
                dcos_installer=oss_installer,
                dcos_config=original_cluster.base_config,
                ip_detect_path=cluster_backend.ip_detect_path,
            )
            cluster.wait_for_dcos_oss()
            for node in {
                    *cluster.masters,
                    *cluster.agents,
                    *cluster.public_agents,
            }:
                build = node.dcos_build_info()
                assert build.version.startswith('2.')
                assert build.commit
                assert build.variant == DCOSVariant.OSS
Exemple #14
0
    def test_run_pytest(self, cluster_backend: ClusterBackend,
                        oss_artifact: Path) -> None:
        """
        Integration tests can be run with `pytest`.
        Errors are raised from `pytest`.
        """
        with Cluster(cluster_backend=cluster_backend) as cluster:
            cluster.install_dcos_from_path(oss_artifact, log_output_live=True)
            cluster.wait_for_dcos_oss()
            # No error is raised with a successful command.
            pytest_command = ['pytest', '-vvv', '-s', '-x', 'test_auth.py']
            cluster.run_integration_tests(
                pytest_command=pytest_command,
                log_output_live=True,
            )

            # An error is raised with an unsuccessful command.
            with pytest.raises(CalledProcessError) as excinfo:
                pytest_command = ['pytest', 'test_no_such_file.py']
                result = cluster.run_integration_tests(
                    pytest_command=pytest_command,
                    log_output_live=True,
                )
                # This result will not be printed if the test passes, but it
                # may provide useful debugging information.
                print(result)  # pragma: no cover

            # `pytest` results in an exit code of 4 when no tests are
            # collected.
            # See https://docs.pytest.org/en/latest/usage.html.
            assert excinfo.value.returncode == 4
Exemple #15
0
    def test_install_dcos_from_path(
        self,
        oss_artifact: Path,
        cluster_backend: ClusterBackend,
    ) -> None:
        """
        DC/OS can be installed on an existing cluster from a path.
        """
        with Cluster(
            cluster_backend=cluster_backend,
            masters=1,
            agents=0,
            public_agents=0,
        ) as original_cluster:
            cluster = Cluster.from_nodes(
                masters=original_cluster.masters,
                agents=original_cluster.agents,
                public_agents=original_cluster.public_agents,
            )

            cluster.install_dcos_from_path(
                build_artifact=oss_artifact,
                dcos_config=original_cluster.base_config,
            )

            cluster.wait_for_dcos_oss()
Exemple #16
0
    def test_extend_config(
        self,
        path: str,
        cluster_backend: ClusterBackend,
        oss_artifact: Path,
    ) -> None:
        """
        This example demonstrates that it is possible to create a cluster
        with an extended configuration file.

        See ``test_default`` for evidence that the custom configuration is
        used.
        """
        config = {
            'cluster_docker_credentials': {
                'auths': {
                    'https://index.docker.io/v1/': {
                        'auth': 'redacted'
                    },
                },
            },
            'cluster_docker_credentials_enabled': True,
        }

        with Cluster(
                generate_config_path=oss_artifact,
                extra_config=config,
                agents=0,
                public_agents=0,
                cluster_backend=cluster_backend,
        ) as cluster:
            cluster.wait_for_dcos()
            (master, ) = cluster.masters
            master.run_as_root(args=['test', '-f', path])
Exemple #17
0
 def test_copy_files(
     self,
     cluster_backend: ClusterBackend,
     tmpdir: local,
     oss_artifact: Path,
 ) -> None:
     """
     Files can be copied from the host to master nodes and the installer
     node at creation time.
     """
     content = str(uuid.uuid4())
     local_file = tmpdir.join('example_file.txt')
     local_file.write(content)
     source_path = Path(str(local_file))
     master_destination_path = Path('/etc/on_master_nodes.txt')
     files_to_copy_to_masters = {source_path: master_destination_path}
     # We currently do not have a way of testing that this works without
     # using custom CA certificates on an enterprise cluster.
     # We add it to the test to at least exercise the code which uses this,
     # but this is insufficient.
     files_to_copy_to_installer = {
         source_path: Path('/genconf/on_installer.txt'),
     }
     with Cluster(
             cluster_backend=cluster_backend,
             generate_config_path=oss_artifact,
             files_to_copy_to_masters=files_to_copy_to_masters,
             files_to_copy_to_installer=files_to_copy_to_installer,
             agents=0,
             public_agents=0,
     ) as cluster:
         (master, ) = cluster.masters
         args = ['cat', str(master_destination_path)]
         result = master.run_as_root(args=args)
         assert result.stdout.decode() == content
Exemple #18
0
    def test_custom(
        self,
        cluster_backend: ClusterBackend,
        oss_artifact: Path,
    ) -> None:
        """
        It is possible to create a cluster with a custom number of nodes.
        """
        # These are chosen be low numbers which are not the defaults.
        # They are also different to one another to make sure that they are not
        # mixed up.
        # Low numbers are chosen to keep the resource usage low.
        masters = 3
        agents = 0
        public_agents = 2

        with Cluster(
                generate_config_path=oss_artifact,
                masters=masters,
                agents=agents,
                public_agents=public_agents,
                cluster_backend=cluster_backend,
        ) as cluster:
            assert len(cluster.masters) == masters
            assert len(cluster.agents) == agents
            assert len(cluster.public_agents) == public_agents
Exemple #19
0
def create_cluster(
    cluster_backend: ClusterBackend,
    masters: int,
    agents: int,
    public_agents: int,
    doctor_message: str,
) -> Cluster:
    """
    Create a cluster.
    """
    spinner = halo.Halo(enabled=sys.stdout.isatty())  # type: ignore
    spinner.start(text='Creating cluster')
    try:
        cluster = Cluster(
            cluster_backend=cluster_backend,
            masters=masters,
            agents=agents,
            public_agents=public_agents,
        )
    except CalledProcessError as exc:
        spinner.stop()
        click.echo('Error creating cluster.', err=True)
        click.echo(click.style('Full error:', fg='yellow'))
        click.echo(click.style(textwrap.indent(str(exc), '  '), fg='yellow'))
        click.echo(doctor_message, err=True)

        sys.exit(exc.returncode)

    spinner.succeed()
    return cluster
Exemple #20
0
    def test_enterprise(
        self,
        cluster_backend: ClusterBackend,
        enterprise_1_9_installer: Path,
    ) -> None:
        """
        A DC/OS Enterprise 1.9 cluster can be started.
        """
        superuser_username = str(uuid.uuid4())
        superuser_password = str(uuid.uuid4())
        config = {
            'superuser_username': superuser_username,
            'superuser_password_hash': sha512_crypt.hash(superuser_password),
        }

        with Cluster(cluster_backend=cluster_backend) as cluster:
            cluster.install_dcos_from_path(
                dcos_installer=enterprise_1_9_installer,
                dcos_config={
                    **cluster.base_config,
                    **config,
                },
                output=Output.CAPTURE,
                ip_detect_path=cluster_backend.ip_detect_path,
            )
            cluster.wait_for_dcos_ee(
                superuser_username=superuser_username,
                superuser_password=superuser_password,
            )
def calico_ipip_cluster(docker_backend: Docker, artifact_path: Path,
                        request: SubRequest, log_dir: Path) -> Iterator[Cluster]:
    with Cluster(
            cluster_backend=docker_backend,
            masters=1,
            agents=2,
            public_agents=1,
    ) as cluster:

        config = {
            "superuser_username": superuser_username,
            # We can hash the password with any `passlib`-based method here.
            # We choose `sha512_crypt` arbitrarily.
            "superuser_password_hash": sha512_crypt.hash(superuser_password),
            "calico_vxlan_enabled": "false",
            "calico_network_cidr": "192.168.128.0/17",
        }
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        yield cluster
Exemple #22
0
    def test_install_dcos(
        self,
        oss_artifact: Path,
        oss_artifact_url: str,
        cluster_backend: ClusterBackend,
    ) -> None:
        """
        If a user attempts to install DC/OS on is called on a `Cluster` created
        from existing nodes, a `NotImplementedError` is raised.
        """
        with Cluster(
                cluster_backend=cluster_backend,
                masters=1,
                agents=0,
                public_agents=0,
        ) as cluster:
            cluster = Cluster.from_nodes(
                masters=cluster.masters,
                agents=cluster.agents,
                public_agents=cluster.public_agents,
                default_ssh_user=cluster_backend.default_ssh_user,
            )

            with pytest.raises(NotImplementedError):
                cluster.install_dcos_from_url(build_artifact=oss_artifact_url)

            with pytest.raises(NotImplementedError):
                cluster.install_dcos_from_path(build_artifact=oss_artifact)
Exemple #23
0
    def test_install_dcos_from_url(
        self,
        oss_installer_url: str,
        cluster_backend: ClusterBackend,
    ) -> None:
        """
        DC/OS can be installed on an existing cluster from a URL.
        """
        with Cluster(
                cluster_backend=cluster_backend,
                masters=1,
                agents=0,
                public_agents=0,
        ) as original_cluster:
            cluster = Cluster.from_nodes(
                masters=original_cluster.masters,
                agents=original_cluster.agents,
                public_agents=original_cluster.public_agents,
            )

            cluster.install_dcos_from_url(
                dcos_installer=oss_installer_url,
                dcos_config=original_cluster.base_config,
                ip_detect_path=cluster_backend.ip_detect_path,
            )

            cluster.wait_for_dcos_oss()
Exemple #24
0
    def test_custom_docker_network(
        self,
        docker_network: Network,
    ) -> None:
        """
        When a network is specified on the Docker backend, each container is
        connected to the default bridge network ``docker0`` and in addition it
        also connected to the custom network.

        The ``Node``'s IP addresses correspond to the custom network.
        """
        with Cluster(
            cluster_backend=Docker(
                network=docker_network,
                transport=Transport.DOCKER_EXEC,
            ),
            agents=0,
            public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            container = _get_container_from_node(node=master)
            networks = container.attrs['NetworkSettings']['Networks']
            assert networks.keys() == set(['bridge', docker_network.name])
            custom_network_ip = networks[docker_network.name]['IPAddress']
            assert custom_network_ip == str(master.public_ip_address)
            assert custom_network_ip == str(master.private_ip_address)
    def test_custom_version(self, docker_version: DockerVersion) -> None:
        """
        It is possible to set a custom version of Docker.

        Running this test requires ``aufs`` to be available.
        Depending on your system, it may be possible to make ``aufs`` available
        using the following commands:

        .. code

           $ apt-get install linux-image-extra-$(uname -r)
           $ modprobe aufs
        """
        # We specify the storage driver because `overlay2` is not compatible
        # with old versions of Docker.
        with Cluster(
                cluster_backend=Docker(
                    docker_version=docker_version,
                    storage_driver=DockerStorageDriver.AUFS,
                ),
                masters=1,
                agents=0,
                public_agents=0,
        ) as cluster:
            (master, ) = cluster.masters
            node_docker_version = self._get_docker_version(node=master)

        assert docker_version == node_docker_version
Exemple #26
0
 def test_docker_exec_transport(
     self,
     docker_network: Network,
     tmpdir: local,
 ) -> None:
     """
     ``Node`` operations with the Docker exec transport work even if the
     node is on a custom network.
     """
     with Cluster(
         cluster_backend=Docker(
             network=docker_network,
             transport=Transport.DOCKER_EXEC,
         ),
         agents=0,
         public_agents=0,
     ) as cluster:
         (master, ) = cluster.masters
         content = str(uuid.uuid4())
         local_file = tmpdir.join('example_file.txt')
         local_file.write(content)
         random = uuid.uuid4().hex
         master_destination_dir = '/etc/{random}'.format(random=random)
         master_destination_path = Path(master_destination_dir) / 'file.txt'
         master.send_file(
             local_path=Path(str(local_file)),
             remote_path=master_destination_path,
             transport=Transport.DOCKER_EXEC,
         )
         args = ['cat', str(master_destination_path)]
         result = master.run(args=args, transport=Transport.DOCKER_EXEC)
         assert result.stdout.decode() == content
Exemple #27
0
 def test_oss(
     self,
     cluster_backend: ClusterBackend,
     oss_1_10_installer: Path,
 ) -> None:
     """
     An open source DC/OS 1.10 cluster can be started.
     """
     with Cluster(cluster_backend=cluster_backend) as cluster:
         cluster.install_dcos_from_path(
             dcos_installer=oss_1_10_installer,
             dcos_config=cluster.base_config,
             output=Output.LOG_AND_CAPTURE,
             ip_detect_path=cluster_backend.ip_detect_path,
         )
         cluster.wait_for_dcos_oss()
         for node in {
                 *cluster.masters,
                 *cluster.agents,
                 *cluster.public_agents,
         }:
             build = node.dcos_build_info()
             assert build.version.startswith('1.10')
             assert build.commit
             assert build.variant == DCOSVariant.OSS
Exemple #28
0
def three_master_cluster(
    artifact_path: Path,
    docker_backend: Docker,
    request: SubRequest,
    log_dir: Path,
) -> Cluster:
    """
    Spin up a highly-available DC/OS cluster with three master nodes.
    """
    with Cluster(
            cluster_backend=docker_backend,
            masters=3,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config=cluster.base_config,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        yield cluster
Exemple #29
0
    def test_run_integration_test(
        self,
        oss_installer: Path,
    ) -> None:
        """
        It is possible to run DC/OS integration tests on Vagrant.
        This test module only requires a single master node.
        """
        cluster_backend = Vagrant()
        with Cluster(
                cluster_backend=cluster_backend,
                masters=1,
                agents=1,
                public_agents=1,
        ) as cluster:
            cluster.install_dcos_from_path(
                dcos_installer=oss_installer,
                dcos_config=cluster.base_config,
                output=Output.CAPTURE,
                ip_detect_path=cluster_backend.ip_detect_path,
            )

            cluster.wait_for_dcos_oss()

            # No error is raised with a successful command.
            cluster.run_with_test_environment(
                args=['pytest', '-vvv', '-s', '-x', 'test_units.py'],
                output=Output.CAPTURE,
            )
Exemple #30
0
    def test_enterprise(
        self,
        cluster_backend: ClusterBackend,
        enterprise_1_10_artifact: Path,
        license_key_contents: str,
    ) -> None:
        """
        A DC/OS Enterprise 1.10 cluster can be started.
        """
        superuser_username = str(uuid.uuid4())
        superuser_password = str(uuid.uuid4())
        config = {
            'superuser_username': superuser_username,
            'superuser_password_hash': sha512_crypt.hash(superuser_password),
            'fault_domain_enabled': False,
            'license_key_contents': license_key_contents,
        }

        with Cluster(cluster_backend=cluster_backend) as cluster:
            cluster.install_dcos_from_path(
                build_artifact=enterprise_1_10_artifact,
                dcos_config={
                    **cluster.base_config,
                    **config,
                },
                log_output_live=True,
            )
            cluster.wait_for_dcos_ee(
                superuser_username=superuser_username,
                superuser_password=superuser_password,
            )