示例#1
0
def three_master_cluster(
    artifact_path: Path,
    docker_backend: Docker,
    request: SubRequest,
    log_dir: Path,
) -> Cluster:
    """
    Spin up a highly-available DC/OS cluster with three master nodes.
    """
    with Cluster(
            cluster_backend=docker_backend,
            masters=3,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config=cluster.base_config,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        yield cluster
def calico_ipip_cluster(docker_backend: Docker, artifact_path: Path,
                        request: SubRequest, log_dir: Path) -> Iterator[Cluster]:
    with Cluster(
            cluster_backend=docker_backend,
            masters=1,
            agents=2,
            public_agents=1,
    ) as cluster:

        config = {
            "superuser_username": superuser_username,
            # We can hash the password with any `passlib`-based method here.
            # We choose `sha512_crypt` arbitrarily.
            "superuser_password_hash": sha512_crypt.hash(superuser_password),
            "calico_vxlan_enabled": "false",
            "calico_network_cidr": "192.168.128.0/17",
        }
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        yield cluster
示例#3
0
def test_calico_disabled(docker_backend: Docker, artifact_path: Path,
                         request: SubRequest, log_dir: Path) -> None:
    with Cluster(
            cluster_backend=docker_backend,
            masters=1,
            agents=1,
            public_agents=1,
    ) as cluster:
        config = {"calico_enabled": "false"}
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )

        calico_units = [
            "dcos-calico-felix", "dcos-calico-bird", "dcos-calico-confd",
            "dcos-calico-libnetwork-plugin", "dcos-etcd"
        ]
        for node in cluster.masters | cluster.agents | cluster.public_agents:
            for unit_name in calico_units:
                assert_system_unit_state(node, unit_name, active=False)
示例#4
0
def test_superuser_service_account_login(
    docker_backend: ClusterBackend,
    artifact_path: Path,
    request: SubRequest,
    log_dir: Path,
) -> None:
    """
    Tests for successful superuser service account login which asserts
    that the default user has been created during cluster start.
    """
    superuser_uid = str(uuid.uuid4())
    superuser_private_key, superuser_public_key = generate_rsa_keypair()
    config = {
        'superuser_service_account_uid': superuser_uid,
        'superuser_service_account_public_key': superuser_public_key,
    }
    with Cluster(
            cluster_backend=docker_backend,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        master = next(iter(cluster.masters))
        master_url = 'http://' + str(master.public_ip_address)
        login_endpoint = master_url + '/acs/api/v1/auth/login'

        service_login_token = jwt.encode(
            {
                'uid': superuser_uid,
                'exp': time.time() + 30
            },
            superuser_private_key,
            algorithm='RS256').decode('ascii')

        response = requests.post(login_endpoint,
                                 json={
                                     'uid': superuser_uid,
                                     'token': service_login_token
                                 })
        assert response.status_code == 200
示例#5
0
def test_superuser_service_account_login(
    docker_backend: ClusterBackend,
    artifact_path: Path,
    request: SubRequest,
    log_dir: Path,
) -> None:
    """
    Tests for successful superuser service account login which asserts
    that the default user has been created during cluster start.
    """
    superuser_uid = str(uuid.uuid4())
    superuser_private_key, superuser_public_key = generate_rsa_keypair()
    config = {
        'superuser_service_account_uid': superuser_uid,
        'superuser_service_account_public_key': superuser_public_key,
    }
    with Cluster(
        cluster_backend=docker_backend,
        agents=0,
        public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        master = next(iter(cluster.masters))
        master_url = 'http://' + str(master.public_ip_address)
        login_endpoint = master_url + '/acs/api/v1/auth/login'

        service_login_token = jwt.encode(
            {'uid': superuser_uid, 'exp': time.time() + 30},
            superuser_private_key,
            algorithm='RS256'
        ).decode('ascii')

        response = requests.post(
            login_endpoint,
            json={'uid': superuser_uid, 'token': service_login_token}
        )
        assert response.status_code == 200
示例#6
0
def dynamic_three_master_cluster(
    artifact_path: Path,
    docker_backend: Docker,
    zookeeper_backend: Container,
    request: SubRequest,
    log_dir: Path,
) -> Generator[Cluster, None, None]:
    """Spin up a dynamic DC/OS cluster with three master nodes."""
    exhibitor_zk_port = 2181
    exhibitor_zk_ip_address = zookeeper_backend.attrs['NetworkSettings'][
        'IPAddress']
    exhibitor_zk_host = '{ip_address}:{port}'.format(
        ip_address=exhibitor_zk_ip_address,
        port=exhibitor_zk_port,
    )
    dynamic_ee_config = {
        'exhibitor_storage_backend': 'zookeeper',
        'exhibitor_zk_hosts': exhibitor_zk_host,
        'exhibitor_zk_path': '/zk-example',
        'master_discovery': 'master_http_loadbalancer',
        # `exhibitor_address` is required for a `zookeeper` based cluster, but
        # does not need to be valid because the cluster has no agents.
        'exhibitor_address': 'none',
        'num_masters': '3',
    }

    with Cluster(
            cluster_backend=docker_backend,
            masters=3,
            agents=0,
            public_agents=0,
    ) as cluster:
        dcos_config = {
            **cluster.base_config,
            **dynamic_ee_config,
        }
        dcos_config.pop('master_list')
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config=cluster.base_config,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        yield cluster
示例#7
0
def test_superuser_service_account_login(
    docker_backend: ClusterBackend,
    artifact_path: Path,
    request: SubRequest,
    log_dir: Path,
    rsa_keypair: Tuple[str, str],
    jwt_token: Callable[[str, str, int], str]
) -> None:
    """
    Tests for successful superuser service account login which asserts
    that the default user has been created during cluster start.
    """
    superuser_uid = str(uuid.uuid4())
    config = {
        'superuser_service_account_uid': superuser_uid,
        'superuser_service_account_public_key': rsa_keypair[1],
    }
    with Cluster(
        cluster_backend=docker_backend,
        agents=0,
        public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        master = next(iter(cluster.masters))
        master_url = 'http://' + str(master.public_ip_address)
        login_endpoint = master_url + '/acs/api/v1/auth/login'

        service_login_token = jwt_token(superuser_uid, rsa_keypair[0], 30)

        response = requests.post(
            login_endpoint,
            json={'uid': superuser_uid, 'token': service_login_token}
        )
        assert response.status_code == 200
示例#8
0
def test_windows_agents(
    workspace_dir: Path,
    artifact_path: Path,
    request: SubRequest,
    log_dir: Path,
) -> None:
    """
    Enabling Windows agents creates additional configuration package
    and does not break Linux installation.
    """
    docker_backend = Docker(workspace_dir=workspace_dir)

    config = {
        'enable_windows_agents': True,
    }
    with Cluster(
            cluster_backend=docker_backend,
            agents=0,
            public_agents=0,
    ) as cluster:
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )

        # Check that dcos-config-win.tar.xz was created
        paths = []
        for root, _, files in os.walk(str(workspace_dir)):
            for file in files:
                if file.startswith('dcos-config-win--setup_'):
                    paths.append(Path(root) / file)
        assert len(paths) == 1

        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
示例#9
0
    def test_transaction_log_backup_and_restore(
        self,
        static_three_master_cluster: Cluster,
        zk_client: KazooClient,
        tmp_path: Path,
        request: SubRequest,
        log_dir: Path,
    ) -> None:
        """
        In a 3-master cluster, backing up the transaction log of ZooKeeper on
        one node and restoring from the backup on all master results in a
        functioning DC/OS cluster with previously backed up Znodes restored.
        """
        # Write to ZooKeeper before backup
        persistent_flag = _zk_set_flag(zk_client)
        ephemeral_flag = _zk_set_flag(zk_client, ephemeral=True)

        random = uuid.uuid4().hex
        backup_name = 'zk-backup-{random}.tar.gz'.format(random=random)
        backup_local_path = tmp_path / backup_name

        # Take ZooKeeper backup from one master node.
        _do_backup(next(iter(static_three_master_cluster.masters)),
                   backup_local_path)

        # Store a datapoint which we expect to get lost.
        not_backed_up_flag = _zk_set_flag(zk_client)

        # Restore ZooKeeper from backup on all master nodes.
        _do_restore(static_three_master_cluster.masters, backup_local_path)

        # Read from ZooKeeper after restore
        assert _zk_flag_exists(zk_client, persistent_flag)
        assert _zk_flag_exists(zk_client, ephemeral_flag)
        assert not _zk_flag_exists(zk_client, not_backed_up_flag)

        # Assert that DC/OS is intact.
        wait_for_dcos_oss(
            cluster=static_three_master_cluster,
            request=request,
            log_dir=log_dir,
        )
示例#10
0
def calico_ipip_cluster(docker_backend: Docker, artifact_path: Path,
                        request: SubRequest,
                        log_dir: Path) -> Iterator[Cluster]:
    # Create a relatively large test cluster, since we've seen problems
    # when many agents attempt to create the Docker network. See
    # https://jira.d2iq.com/browse/D2IQ-70674
    with Cluster(
            cluster_backend=docker_backend,
            masters=3,
            agents=8,
            public_agents=8,
    ) as cluster:

        config = {
            "superuser_username": superuser_username,
            # We can hash the password with any `passlib`-based method here.
            # We choose `sha512_crypt` arbitrarily.
            "superuser_password_hash": sha512_crypt.hash(superuser_password),
            "calico_vxlan_enabled": "false",
            "calico_network_cidr": "192.168.128.0/17",
        }
        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )
        yield cluster

        dump_cluster_journals(
            cluster=cluster,
            target_dir=log_dir / artifact_dir_format(request.node.name),
        )
示例#11
0
    def test_snapshot_backup_and_restore(
        self,
        static_three_master_cluster: Cluster,
        zk_client: KazooClient,
        tmp_path: Path,
        request: SubRequest,
        log_dir: Path,
    ) -> None:
        """
        In a 3-master cluster, backing up a snapshot of ZooKeeper on
        one node and restoring from the backup on all master results in a
        functioning DC/OS cluster with previously backed up Znodes restored.
        """
        # Modify Exhibitor conf, generating ZooKeeper conf (set
        # `snapCount=1`). This config change instructs ZooKeeper to Adding
        # the `snapCount` here only works as long as DC/OS does not set it.
        args = [
            'sed',
            '-i',
            "'s/zoo-cfg-extra=/zoo-cfg-extra=snapCount\\\\=1\\&/'",
            '/opt/mesosphere/active/exhibitor/usr/exhibitor/start_exhibitor.py',
        ]
        for master in static_three_master_cluster.masters:
            master.run(
                args=args,
                shell=True,
                output=Output.LOG_AND_CAPTURE,
            )
        for master in static_three_master_cluster.masters:
            master.run(['systemctl', 'restart', 'dcos-exhibitor'])

        wait_for_dcos_oss(
            cluster=static_three_master_cluster,
            request=request,
            log_dir=log_dir,
        )

        # Write to ZooKeeper multiple times before backup
        persistent_flag = _zk_set_flag(zk_client)
        ephemeral_flag = _zk_set_flag(zk_client, ephemeral=True)

        # Extra ZooKeeper write, triggering snapshot creation due to
        # `snapCount=1`. After this we can be sure the previous writes are
        # contained in at least one of the generated snapshots.
        _zk_set_flag(zk_client)

        random = uuid.uuid4().hex
        backup_name = 'zk-backup-{random}.tar.gz'.format(random=random)
        backup_local_path = tmp_path / backup_name

        # Take ZooKeeper backup from one master node.
        _do_backup(next(iter(static_three_master_cluster.masters)),
                   backup_local_path)

        # Store a datapoint which we expect to be lost.
        not_backed_up_flag = _zk_set_flag(zk_client)

        # Restore ZooKeeper from backup on all master nodes.
        _do_restore(static_three_master_cluster.masters, backup_local_path)

        # Read from ZooKeeper after restore
        assert _zk_flag_exists(zk_client, persistent_flag)
        assert _zk_flag_exists(zk_client, ephemeral_flag)
        assert not _zk_flag_exists(zk_client, not_backed_up_flag)

        # Assert DC/OS is intact.
        wait_for_dcos_oss(
            cluster=static_three_master_cluster,
            request=request,
            log_dir=log_dir,
        )
def test_replace_all_static(
    artifact_path: Path,
    docker_network_three_available_addresses: Network,
    tmp_path: Path,
    request: SubRequest,
    log_dir: Path,
) -> None:
    """
    In a cluster with an Exhibitor backend consisting of a static ZooKeeper
    ensemble, after removing one master, and then adding another master with
    the same IP address, the cluster will get to a healthy state. This is
    repeated until all masters in the original cluster have been replaced.
    The purpose of this test is to assert that the ``node-poststart``
    procedure correctly prevents a master node replacement from being performed
    too quickly. A new master node should only become part of the cluster if
    there are no more underreplicated ranges reported by CockroachDB.

    Permanent CockroachDB data loss and a potential breakage of DC/OS occurs
    when a second master node is taken down for replacement while CockroachDB
    is recovering and there are still underreplicated ranges due to a recent
    other master node replacement.
    """
    docker_backend = Docker(network=docker_network_three_available_addresses)

    with Cluster(
            cluster_backend=docker_backend,
            # Allocate all 3 available IP addresses in the subnet.
            masters=3,
            agents=0,
            public_agents=0,
    ) as original_cluster:
        master = next(iter(original_cluster.masters))
        result = master.run(
            args=[
                'ifconfig',
                '|',
                'grep',
                '-B1',
                str(master.public_ip_address),
                '|',
                'grep',
                '-o',
                '"^\w*"',
            ],
            shell=True,
        )
        interface = result.stdout.strip().decode()
        ip_detect_contents = textwrap.dedent(
            """\
            #!/bin/bash -e
            if [ -f /sbin/ip ]; then
               IP_CMD=/sbin/ip
            else
               IP_CMD=/bin/ip
            fi

            $IP_CMD -4 -o addr show dev {interface} | awk '{{split($4,a,"/");print a[1]}}'
            """.format(interface=interface), )
        ip_detect_path = tmp_path / 'ip-detect'
        ip_detect_path.write_text(data=ip_detect_contents)
        static_config = {
            'master_discovery':
            'static',
            'master_list': [
                str(master.private_ip_address)
                for master in original_cluster.masters
            ],
        }
        dcos_config = {
            **original_cluster.base_config,
            **static_config,
        }
        original_cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config=dcos_config,
            ip_detect_path=ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=original_cluster,
            request=request,
            log_dir=log_dir,
        )
        current_cluster = original_cluster
        tmp_clusters = set()

        original_masters = original_cluster.masters

        try:
            for master_to_be_replaced in original_masters:
                # Destroy a master and free one IP address.
                current_cluster.destroy_node(node=master_to_be_replaced)

                temporary_cluster = Cluster(
                    cluster_backend=docker_backend,
                    # Allocate one container with the now free IP address.
                    masters=1,
                    agents=0,
                    public_agents=0,
                )
                tmp_clusters.add(temporary_cluster)

                # Install a new master on a new container with the same IP address.
                (new_master, ) = temporary_cluster.masters
                new_master.install_dcos_from_path(
                    dcos_installer=artifact_path,
                    dcos_config=dcos_config,
                    role=Role.MASTER,
                    ip_detect_path=ip_detect_path,
                )
                # Form a new cluster with the newly create master node.
                new_cluster = Cluster.from_nodes(
                    masters=current_cluster.masters.add(new_master),
                    agents=current_cluster.agents,
                    public_agents=current_cluster.public_agents,
                )
                # The `wait_for_dcos_oss` function waits until the new master has
                # joined the cluster and all masters are healthy. Without the
                # cockroachdb check, this succeeds before all cockroachdb ranges
                # have finished replicating to the new master. That meant that the
                # next master would be replaced too quickly, while it had data that
                # was not present elsewhere in the cluster. This lead to
                # irrecoverable dataloss.  This function waits until the
                # master node is "healthy". This is a requirement for replacing the
                # next master node.
                #
                # We don't call the cockroachdb ranges check directly as the
                # purpose of this test is to ensure that when an operator follows
                # our documented procedure for replacing a master node multiple
                # times in a row (e.g. during a cluster upgrade) then the cluster
                # remains healthy throughout and afterwards.
                #
                # If we called the check directly here, we would be
                # sure the check is being called, but we would not be sure that
                # "wait_for_dcos_oss", i.e., the standard procedure for determining
                # whether a node is healthy, is sufficient to prevent the cluster
                # from breaking.
                #
                # We perform this check after every master is replaced, as that is
                # what we tell operators to do: "After installing the new master
                # node, wait until it becomes healthy before proceeding to the
                # next."
                #
                # The procedure for replacing multiple masters is documented here:
                # https://docs.mesosphere.com/1.12/installing/production/upgrading/#dcos-masters
                wait_for_dcos_oss(
                    cluster=new_cluster,
                    request=request,
                    log_dir=log_dir,
                )
                # Use the new cluster object in the next replacement iteration.
                current_cluster = new_cluster

        finally:
            for cluster in tmp_clusters:
                cluster.destroy()
示例#13
0
def test_replace_all_static(
    artifact_path: Path,
    docker_network_three_available_addresses: Network,
    tmp_path: Path,
    request: SubRequest,
    log_dir: Path,
) -> None:
    """
    In a cluster with an Exhibitor backend consisting of a static ZooKeeper
    ensemble, after removing one master, and then adding another master with
    the same IP address, the cluster will get to a healthy state. This is
    repeated until all masters in the original cluster have been replaced.
    The purpose of this test is to assert that the ``node-poststart``
    procedure correctly prevents a master node replacement from being performed
    too quickly. A new master node should only become part of the cluster if
    there are no more underreplicated ranges reported by CockroachDB.

    Permanent CockroachDB data loss and a potential breakage of DC/OS occurs
    when a second master node is taken down for replacement while CockroachDB
    is recovering and there are still underreplicated ranges due to a recent
    other master node replacement.
    """
    docker_backend = Docker(network=docker_network_three_available_addresses)

    with Cluster(
        cluster_backend=docker_backend,
        # Allocate all 3 available IP addresses in the subnet.
        masters=3,
        agents=0,
        public_agents=0,
    ) as original_cluster:
        master = next(iter(original_cluster.masters))
        result = master.run(
            args=[
                'ifconfig',
                '|', 'grep', '-B1', str(master.public_ip_address),
                '|', 'grep', '-o', '"^\w*"',
            ],
            output=Output.LOG_AND_CAPTURE,
            shell=True,
        )
        interface = result.stdout.strip().decode()
        ip_detect_contents = textwrap.dedent(
            """\
            #!/bin/bash -e
            if [ -f /sbin/ip ]; then
               IP_CMD=/sbin/ip
            else
               IP_CMD=/bin/ip
            fi

            $IP_CMD -4 -o addr show dev {interface} | awk '{{split($4,a,"/");print a[1]}}'
            """.format(interface=interface),
        )
        ip_detect_path = tmp_path / 'ip-detect'
        ip_detect_path.write_text(data=ip_detect_contents)
        static_config = {
            'master_discovery': 'static',
            'master_list': [str(master.private_ip_address)
                            for master in original_cluster.masters],
        }
        dcos_config = {
            **original_cluster.base_config,
            **static_config,
        }
        original_cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config=dcos_config,
            ip_detect_path=ip_detect_path,
        )
        wait_for_dcos_oss(
            cluster=original_cluster,
            request=request,
            log_dir=log_dir,
        )
        current_cluster = original_cluster
        tmp_clusters = set()

        original_masters = original_cluster.masters

        try:
            for master_to_be_replaced in original_masters:
                # Destroy a master and free one IP address.
                original_cluster.destroy_node(node=master_to_be_replaced)

                temporary_cluster = Cluster(
                    cluster_backend=docker_backend,
                    # Allocate one container with the now free IP address.
                    masters=1,
                    agents=0,
                    public_agents=0,
                )
                tmp_clusters.add(temporary_cluster)

                # Install a new master on a new container with the same IP address.
                (new_master, ) = temporary_cluster.masters
                new_master.install_dcos_from_path(
                    dcos_installer=artifact_path,
                    dcos_config=dcos_config,
                    role=Role.MASTER,
                    ip_detect_path=ip_detect_path,
                )
                # Form a new cluster with the newly create master node.
                new_cluster = Cluster.from_nodes(
                    masters=current_cluster.masters.union({new_master}),
                    agents=current_cluster.agents,
                    public_agents=current_cluster.public_agents,
                )
                # The `wait_for_dcos_oss` function waits until the new master has
                # joined the cluster and all masters are healthy. Without the
                # cockroachdb check, this succeeds before all cockroachdb ranges
                # have finished replicating to the new master. That meant that the
                # next master would be replaced too quickly, while it had data that
                # was not present elsewhere in the cluster. This lead to
                # irrecoverable dataloss.  This function waits until the
                # master node is "healthy". This is a requirement for replacing the
                # next master node.
                #
                # We don't call the cockroachdb ranges check directly as the
                # purpose of this test is to ensure that when an operator follows
                # our documented procedure for replacing a master node multiple
                # times in a row (e.g. during a cluster upgrade) then the cluster
                # remains healthy throughout and afterwards.
                #
                # If we called the check directly here, we would be
                # sure the check is being called, but we would not be sure that
                # "wait_for_dcos_oss", i.e., the standard procedure for determining
                # whether a node is healthy, is sufficient to prevent the cluster
                # from breaking.
                #
                # We perform this check after every master is replaced, as that is
                # what we tell operators to do: "After installing the new master
                # node, wait until it becomes healthy before proceeding to the
                # next."
                #
                # The procedure for replacing multiple masters is documented here:
                # https://docs.mesosphere.com/1.12/installing/production/upgrading/#dcos-masters
                wait_for_dcos_oss(
                    cluster=new_cluster,
                    request=request,
                    log_dir=log_dir,
                )
                # Use the new cluster object in the next replacement iteration.
                current_cluster = new_cluster

        finally:
            for cluster in tmp_clusters:
                cluster.destroy()
def test_access(
    docker_backend: Docker,
    artifact_path: Path,
    request: SubRequest,
    log_dir: Path,
    rsa_keypair: Tuple[str, str],
    jwt_token: Callable[[str, str, int], str],
    calicoctl: Callable[[List[str], Optional[dict]], dict],
) -> None:
    with Cluster(
            cluster_backend=docker_backend,
            masters=1,
            agents=0,
            public_agents=0,
    ) as cluster:
        uid = str(uuid.uuid4())

        config = {
            'superuser_service_account_uid': uid,
            'superuser_service_account_public_key': rsa_keypair[1],
        }

        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )

        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )

        master = next(iter(cluster.masters))
        master_ip = master.public_ip_address
        login_endpoint = 'http://{}/acs/api/v1/auth/login'.format(master_ip)
        service_login_token = jwt_token(uid, rsa_keypair[0], 30)

        token_response = requests.post(login_endpoint,
                                       json={
                                           'uid': uid,
                                           'token': service_login_token
                                       })

        assert token_response.status_code == 200
        token = token_response.json().get('token')
        env = {
            'ETCD_ENDPOINTS': 'http://{}:12379'.format(master_ip),
        }

        result = calicoctl(['get', 'nodes'], env)
        assert 'access denied' in result['stderr']
        assert result['returncode'] != 0

        authorization = 'authorization:token={}'.format(token)
        env['ETCD_CUSTOM_GRPC_METADATA'] = authorization
        result = calicoctl(['get', 'nodes'], env)
        assert 'NAME' in result['stdout']
        assert result['returncode'] == 0
示例#15
0
def test_adminrouter_grpc_proxy_port(docker_backend: Docker,
                                     artifact_path: Path,
                                     request: SubRequest,
                                     log_dir: Path,
                                     rsa_keypair: Tuple[str, str],
                                     jwt_token: Callable[[str, str, int], str]
                                     ) -> None:
    random_port = random.randint(63000, 64000)

    with Cluster(
            cluster_backend=docker_backend,
            masters=1,
            agents=0,
            public_agents=0,
    ) as cluster:
        uid = str(uuid.uuid4())

        config = {
            'superuser_service_account_uid': uid,
            'superuser_service_account_public_key': rsa_keypair[1],
            'adminrouter_grpc_proxy_port': '{}'.format(random_port),
        }

        cluster.install_dcos_from_path(
            dcos_installer=artifact_path,
            dcos_config={
                **cluster.base_config,
                **config,
            },
            output=Output.LOG_AND_CAPTURE,
            ip_detect_path=docker_backend.ip_detect_path,
        )

        wait_for_dcos_oss(
            cluster=cluster,
            request=request,
            log_dir=log_dir,
        )

        master = next(iter(cluster.masters))
        master_ip = master.public_ip_address
        login_endpoint = 'http://{}/acs/api/v1/auth/login'.format(master_ip)
        service_login_token = jwt_token(uid, rsa_keypair[0], 30)

        token_response = requests.post(
            login_endpoint,
            json={'uid': uid, 'token': service_login_token}
        )

        assert token_response.status_code == 200
        token = token_response.json().get('token')

        etcd = etcd3.Etcd3Client(
            host=list(cluster.masters)[0].public_ip_address,
            port=random_port,
            timeout=None,
        )
        etcd.metadata = (('authorization', 'token={}'.format(token)),)
        etcd.watcher = etcd3.watch.Watcher(
            etcd3.etcdrpc.WatchStub(etcd.channel),
            timeout=etcd.timeout,
            call_credentials=etcd.call_credentials,
            metadata=etcd.metadata
        )

        value, meta = etcd.get('probably-invalid-key')
        assert value is None
        assert meta is None