Beispiel #1
0
    def test_get_nodes(self):
        # GIVEN
        core_mock = Mock()
        nodes_list = V1NodeList(items=[
            V1Node(api_version='v1',
                   kind='Node',
                   metadata=V1ObjectMeta(
                       name='kip-node',
                       labels={KIP_NODE_LABEL_KEY: KIP_NODE_LABEL_VALUE})),
            V1Node(api_version='v1',
                   kind='Node',
                   metadata=V1ObjectMeta(
                       name='other-node',
                       labels={KIP_NODE_LABEL_KEY: 'other-value'})),
            V1Node(api_version='v1',
                   kind='Node',
                   metadata=V1ObjectMeta(name='other-node-2',
                                         labels={'other-key': 'other-value'})),
        ])
        nodes_list.items = nodes_list.items
        core_mock.list_node.return_value = nodes_list
        cluster_cost = ClusterCost(core_mock, Mock())
        physical_nodes = ['other-node', 'other-node-2']

        # WHEN
        nodes = cluster_cost.get_nodes()

        # THEN
        for node in nodes:
            self.assertNotEqual(node.name, 'kip-node')
            self.assertIn(node.name, physical_nodes)
Beispiel #2
0
def mocked_k8s_CoreV1Api(mocker):
    mocked_coreV1Api_class = mocker.patch('kubernetes.client.CoreV1Api')
    mocker.patch('kubernetes.client.ApiClient')
    coreV1API_instance = mocked_coreV1Api_class.return_value

    pods_mock = MagicMock()
    pods_mock.items = [
        MagicMock(spec=V1Pod),
        MagicMock(spec=V1Pod),
        MagicMock(spec=V1Pod)
    ]
    coreV1API_instance.list_pod_for_all_namespaces.return_value = pods_mock

    services_mock = MagicMock()
    services_mock.items = [
        MagicMock(spec=V1Service),
        MagicMock(spec=V1Service),
        MagicMock(spec=V1Service)
    ]
    coreV1API_instance.list_service_for_all_namespaces.return_value = services_mock

    v1_namespace = V1Namespace()
    v1_metadata_namespace = V1ObjectMeta(name=test_namespace)
    v1_namespace.metadata = v1_metadata_namespace
    v1_namespace_status = V1NamespaceStatus(phase=NamespaceStatus.ACTIVE.value)
    v1_namespace.status = v1_namespace_status

    coreV1API_instance.read_namespace.return_value = v1_namespace
    coreV1API_instance.delete_namespace.return_value = V1Status(
        status="{'phase': 'Terminating'}")

    v1_config_map = V1ConfigMap(data=test_config_map_data())

    coreV1API_instance.read_namespaced_config_map.return_value = v1_config_map

    secret_data = {"token": TEST_TOKEN}
    v1_metadata_secret = V1ObjectMeta(name="default-token")
    v1_secret = V1Secret(metadata=v1_metadata_secret, data=secret_data)
    v1_secret_list = V1SecretList(items=[v1_secret])

    coreV1API_instance.list_namespaced_secret.return_value = v1_secret_list

    v1_pod_status = V1PodStatus(phase=K8S_RUNNING_POD_STATUS)
    v1_pod = V1Pod(status=v1_pod_status)
    v1_pod_lists = V1PodList(items=[v1_pod])

    coreV1API_instance.list_namespaced_pod.return_value = v1_pod_lists

    v1_metadata_event = V1ObjectMeta(name="default-name")
    v1_object = V1ObjectReference(name="pod_name")
    v1_event = V1Event(message="Insufficient cpu",
                       involved_object=v1_object,
                       metadata=v1_metadata_event)
    v1_event_list = V1EventList(items=[v1_event])

    coreV1API_instance.list_namespaced_event.return_value = v1_event_list

    return coreV1API_instance
Beispiel #3
0
 async def aw():
     await fw.add(V1Service(metadata=V1ObjectMeta(name="rejected")))
     await fw.delete(V1Pod(metadata=V1ObjectMeta(name="foo")))
     await fw.modify(
         V1Pod(metadata=V1ObjectMeta(name="bar", resource_version="55"))
     )
     await fw.add(
         V1Pod(metadata=V1ObjectMeta(name="baz", resource_version="32"))
     )
     await fw.stop()
Beispiel #4
0
def test_get_names_in_job():

    pod_list = V1PodList(items=[
        V1Pod(metadata=V1ObjectMeta(name='foo')),
        V1Pod(metadata=V1ObjectMeta(name='bar'))
    ])
    mock_client = create_mocked_client()

    mock_client.core_api.list_namespaced_pod.side_effect = [pod_list]

    assert mock_client.get_pod_names_in_job('job',
                                            'namespace') == ['foo', 'bar']
Beispiel #5
0
def test_get_names_in_job():

    pod_list = V1PodList(items=[
        V1Pod(metadata=V1ObjectMeta(name="foo")),
        V1Pod(metadata=V1ObjectMeta(name="bar"))
    ])
    mock_client = create_mocked_client()

    mock_client.core_api.list_namespaced_pod.side_effect = [pod_list]

    assert mock_client.get_pod_names_in_job("job",
                                            "namespace") == ["foo", "bar"]
Beispiel #6
0
    async def test_listener_resync_periods(self):
        source = fake_controller_source.FakeControllerSource()
        await source.add(V1Pod(metadata=V1ObjectMeta(name="pod1")))
        await source.add(V1Pod(metadata=V1ObjectMeta(name="pod2")))

        informer = new_shared_informer(source, V1Pod, 1)

        clock_ = clock.FakeClock(time.time())
        informer._clock = clock_
        informer._processor._clock = clock_

        listener1 = TestListener("listener1", 0, "pod1", "pod2")
        await informer.add_event_handler(
            listener1, resync_period=listener1._resync_period)

        listener2 = TestListener("listener2", 2, "pod1", "pod2")
        await informer.add_event_handler(
            listener2, resync_period=listener2._resync_period)

        listener3 = TestListener("listener3", 3, "pod1", "pod2")
        await informer.add_event_handler(
            listener3, resync_period=listener3._resync_period)
        listeners = [listener1, listener2, listener3]

        try:
            task = asyncio.ensure_future(informer.run())

            for listener in listeners:
                self.assertTrue(await listener._ok())

            for listener in listeners:
                listener._received_item_names = []

            await clock_.step(2)
            self.assertTrue(await listener2._ok())

            await asyncio.sleep(1)
            self.assertEqual(len(listener1._received_item_names), 0)
            self.assertEqual(len(listener3._received_item_names), 0)

            for listener in listeners:
                listener._received_item_names = []

            await clock_.step(1)
            self.assertTrue(await listener3._ok())

            await asyncio.sleep(1)
            self.assertEqual(len(listener1._received_item_names), 0)
            self.assertEqual(len(listener2._received_item_names), 0)
        finally:
            task.cancel()
            await asyncio.gather(task, return_exceptions=True)
Beispiel #7
0
def test_docker_pull_config_secret():
    pull_config_str = '{"auths":{"example.com":{"username":"******","password":"******",'\
                      '"email":"*****@*****.**","auth":"f00BA7"}}}'

    migrated_dcos_secret = V1Secret(
        kind='Secret',
        api_version='v1',
        metadata=V1ObjectMeta(name='nothing-depends-on-this-name'),
        data={'nothing-depends-on-the-name-of-this-key': pull_config_str})

    input_manifest_list = ManifestList()
    input_manifest_list.append(
        Manifest(pluginName="secret",
                 manifestName="foo.docker-c_nfig",
                 data=[migrated_dcos_secret]))

    app = {
        "id": "/foo/barify",
        "container": {
            "docker": {
                "pullConfig": {
                    "secret": "pull-config"
                }
            }
        },
        "env": {
            "BAR": {
                "secret": "pull-config"
            }
        },  # See the NOTE below
        "secrets": {
            "pull-config": {
                "source": "/foo/docker-c@nfig"
            },
            "unused": {
                "source": "unused"
            },
        },
    }

    migrator = MarathonMigrator(object=app, manifest_list=input_manifest_list)
    manifest = migrator.migrate()

    # NOTE: Thit test expects that two secrets will be created:
    # one for the image pull config and another for everything else.
    # This might be not the optimal migration startegy.
    [deployment] = [m for m in manifest if isinstance(m, V1Deployment)]

    [pull_secret] = [m for m in manifest \
        if isinstance(m, V1Secret) and m.type == "kubernetes.io/dockerconfigjson"]

    [generic_secret] = [m for m in manifest \
        if isinstance(m, V1Secret) and m.type != "kubernetes.io/dockerconfigjson"]

    assert deployment.spec.template.spec.image_pull_secrets[
        0].name == pull_secret.metadata.name

    assert pull_secret.data[".dockerconfigjson"] == pull_config_str

    assert generic_secret.data["foo.docker-c_nfig"] == pull_config_str
def make_service(
    name,
    port,
    labels=None,
    annotations=None,
):
    """
    Make a k8s service specification for fronting the pod running a user notebook.

    Parameters
    ----------
    name:
        Name of the service. Must be a valid DNS label.
    port:
        The port to which the service binds.
    labels:
        Labels to add to the spawned service.
    annotations:
        Annotations to add to the spawned service.

    """
    return V1Service(
            kind='Service',
            metadata=V1ObjectMeta(name=name, labels=labels, annotations=annotations),
            spec=V1ServiceSpec(
                type='ClusterIP',
                selector={'hub.jupyter.org/server-name': name},
                ports=[V1ServicePort(port=port, target_port=port)]
            )
        )
    def migrate(self, backupList: BackupList, manifestList: ManifestList,
                **kwargs: Any) -> ManifestList:
        ml = ManifestList()

        for ba in backupList.backups(pluginName='secret'):
            assert isinstance(ba, Backup)
            metadata = V1ObjectMeta()
            metadata.annotations = {}

            clusterMeta = manifestList.clusterMeta()
            if clusterMeta:
                metadata.annotations = clusterMeta.annotations

            logging.debug("Found backup {}".format(ba))
            b = ba.data
            fullPath = "/".join(filter(None, [b["path"], b["key"]]))
            name = b["key"]

            metadata.annotations[utils.namespace_path(
                "secret-path")] = fullPath
            metadata.name = utils.make_subdomain(name.split('/'))
            sec = V1Secret(metadata=metadata)
            sec.api_version = 'v1'
            sec.kind = 'Secret'
            # K8s requires secret values to be base64-encoded.  The secret value
            # is base64-encoded during backup so it can be passed as-is here.
            sec.data = {utils.dnsify(name): b['value']}

            manifest = Manifest(pluginName=self.plugin_name,
                                manifestName=utils.dnsify(fullPath))
            manifest.append(sec)

            ml.append(manifest)

        return ml
Beispiel #10
0
def make_pvc(name, storage_class, access_modes, storage, labels):
    """
    Make a k8s pvc specification for running a user notebook.

    Parameters:
      - name:
        Name of persistent volume claim. Must be unique within the namespace the object is
        going to be created in. Must be a valid DNS label.
      - storage_class
        String of the name of the k8s Storage Class to use.
      - access_modes:
        A list of specifying what access mode the pod should have towards the pvc
      - storage
        The ammount of storage needed for the pvc

    """
    pvc = V1PersistentVolumeClaim()
    pvc.kind = "PersistentVolumeClaim"
    pvc.api_version = "v1"
    pvc.metadata = V1ObjectMeta()
    pvc.metadata.name = name
    pvc.metadata.annotations = {}
    if storage_class:
        pvc.metadata.annotations.update(
            {"volume.beta.kubernetes.io/storage-class": storage_class})
    pvc.metadata.labels = {}
    pvc.metadata.labels.update(labels)
    pvc.spec = V1PersistentVolumeClaimSpec()
    pvc.spec.access_modes = access_modes
    pvc.spec.resources = V1ResourceRequirements()
    pvc.spec.resources.requests = {"storage": storage}

    return pvc
Beispiel #11
0
def test_long_running_job():
    mock_client = create_mocked_client()

    job_name = "a_job"
    namespace = "a_namespace"

    a_job_metadata = V1ObjectMeta(name=job_name)

    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        a_job_is_launched_list
    ]

    # running job
    running_job = V1Job(metadata=a_job_metadata,
                        status=V1JobStatus(failed=0, succeeded=0))
    completed_job = V1Job(metadata=a_job_metadata,
                          status=V1JobStatus(failed=0, succeeded=1))

    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        running_job, completed_job
    ]

    mock_client.wait_for_job_success(job_name, namespace)

    # slept once waiting for job to complete
    assert len(mock_client.sleeper.mock_calls) == 1
Beispiel #12
0
def test_wait_for_job_success():
    mock_client = create_mocked_client()

    job_name = "a_job"
    namespace = "a_namespace"

    a_job_metadata = V1ObjectMeta(name=job_name)

    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        a_job_is_launched_list
    ]

    completed_job = V1Job(metadata=a_job_metadata,
                          status=V1JobStatus(failed=0, succeeded=1))
    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        completed_job
    ]

    mock_client.wait_for_job_success(job_name, namespace)

    # logger should not have been called
    assert not mock_client.logger.mock_calls
    # sleeper should not have been called
    assert not mock_client.sleeper.mock_calls
Beispiel #13
0
def test_wait_for_job_with_api_errors_retry_limit_exceeded():
    mock_client = create_mocked_client()

    job_name = "a_job"

    a_job_metadata = V1ObjectMeta(name=job_name)

    not_launched_yet_list = V1JobList(items=[])
    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        not_launched_yet_list,
        a_job_is_launched_list,
    ]

    completed_job = V1Job(metadata=a_job_metadata,
                          status=V1JobStatus(failed=0, succeeded=1))
    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        completed_job
    ]

    with pytest.raises(DagsterK8sAPIRetryLimitExceeded):
        mock_client.wait_for_job_success("a_job", "a_namespace")

    # 4 attempts with errors
    assert len(mock_client.batch_api.list_namespaced_job.mock_calls) == 4
Beispiel #14
0
def test_wait_for_job_not_launched():
    mock_client = create_mocked_client()

    job_name = "a_job"
    namespace = "a_namespace"

    a_job_metadata = V1ObjectMeta(name=job_name)

    not_launched_yet_list = V1JobList(items=[])
    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        not_launched_yet_list,
        a_job_is_launched_list,
    ]

    completed_job = V1Job(metadata=a_job_metadata,
                          status=V1JobStatus(failed=0, succeeded=1))
    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        completed_job
    ]

    mock_client.wait_for_job_success(job_name, namespace)

    assert_logger_calls(mock_client.logger,
                        ['Job "a_job" not yet launched, waiting'])

    assert len(mock_client.sleeper.mock_calls) == 1
Beispiel #15
0
def test_wait_for_job_with_api_errors():
    mock_client = create_mocked_client()

    job_name = "a_job"
    namespace = "a_namespace"

    a_job_metadata = V1ObjectMeta(name=job_name)

    not_launched_yet_list = V1JobList(items=[])
    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        not_launched_yet_list,
        a_job_is_launched_list,
    ]

    completed_job = V1Job(metadata=a_job_metadata,
                          status=V1JobStatus(failed=0, succeeded=1))
    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        completed_job
    ]

    mock_client.wait_for_job_success(job_name, namespace)

    # 2 attempts with errors + 1 not launched + 1 launched
    assert len(mock_client.batch_api.list_namespaced_job.mock_calls) == 4
Beispiel #16
0
def test_wait_for_job_success_with_api_errors():
    mock_client = create_mocked_client()

    job_name = "a_job"
    namespace = "a_namespace"

    a_job_metadata = V1ObjectMeta(name=job_name)

    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        a_job_is_launched_list
    ]

    completed_job = V1Job(metadata=a_job_metadata,
                          status=V1JobStatus(failed=0, succeeded=1))
    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        kubernetes.client.rest.ApiException(status=503,
                                            reason="Service unavailable"),
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        completed_job,
    ]

    mock_client.wait_for_job_success(job_name, namespace)

    # logger should not have been called
    assert not mock_client.logger.mock_calls
    # sleeper should not have been called
    assert not mock_client.sleeper.mock_calls

    # 2 attempts with errors + 1 SUCCESS
    assert len(
        mock_client.batch_api.read_namespaced_job_status.mock_calls) == 3
Beispiel #17
0
def test_wait_for_job_success_with_unrecoverable_api_errors():
    mock_client = create_mocked_client()

    job_name = "a_job"

    a_job_metadata = V1ObjectMeta(name=job_name)

    a_job_is_launched_list = V1JobList(items=[V1Job(metadata=a_job_metadata)])
    mock_client.batch_api.list_namespaced_job.side_effect = [
        a_job_is_launched_list
    ]

    mock_client.batch_api.read_namespaced_job_status.side_effect = [
        kubernetes.client.rest.ApiException(status=504,
                                            reason="Gateway Timeout"),
        kubernetes.client.rest.ApiException(status=429,
                                            reason="Too many requests"),
    ]

    with pytest.raises(DagsterK8sUnrecoverableAPIError) as exc_info:
        mock_client.wait_for_job_success("a_job", "a_namespace")

    assert "Unexpected error encountered in Kubernetes API Client." in str(
        exc_info.value)

    # logger should not have been called
    assert not mock_client.logger.mock_calls
    # sleeper should not have been called
    assert not mock_client.sleeper.mock_calls

    # 1 retry error 1 unrecoverable error
    assert len(
        mock_client.batch_api.read_namespaced_job_status.mock_calls) == 2
def build_crd(kind='MyCustom',
              singular='mycustom',
              plural='mycustoms',
              group='example.com',
              scope='Namespaced',
              versions=['v1']):
    versions_list = []
    for idx, version in enumerate(versions):
        served = False if idx != 0 else True
        storage = False if idx != 0 else True
        version_def = V1beta1CustomResourceDefinitionVersion(name=version,
                                                             served=served,
                                                             storage=storage)
        versions_list.append(version_def)
    spec = V1beta1CustomResourceDefinitionSpec(
        group=group,
        names=V1beta1CustomResourceDefinitionNames(plural=plural,
                                                   singular=singular,
                                                   kind=kind),
        scope=scope,
        versions=versions_list)
    crd = V1beta1CustomResourceDefinition(
        api_version=DEFAULT_CRD_API_VERSION,
        kind=kind,
        metadata=V1ObjectMeta(name=f'{plural}.{group}'),
        spec=spec)
    return crd
Beispiel #19
0
def _create_remapped_secret(
    manifest_list: Optional[ManifestList],
    remapping: SecretRemapping,
    app_id: str,
) -> Optional[V1Secret]:

    if not remapping.key_mapping:
        return None

    assert manifest_list is not None
    clusterMeta: Optional[V1ObjectMeta] = manifest_list.clusterMeta()

    metadata = V1ObjectMeta(annotations={})
    if clusterMeta is not None:
        metadata.annotations = clusterMeta.annotations

    metadata.annotations[utils.namespace_path("marathon-appid")] = app_id
    metadata.name = utils.dnsify(remapping.dest_name)
    secret = V1Secret(metadata=metadata, data={})
    secret.api_version = 'v1'
    secret.kind = 'Secret'
    if remapping.dest_type is not None:
        secret.type = remapping.dest_type

    for source_key, destination_key in remapping.key_mapping.items():
        sourceSecret = manifest_list.manifest(pluginName='secret', manifestName=source_key)
        if not sourceSecret:
            raise NoMigratedSecretFound('No migrated secret "{}" found'.format(source_key))

        [value] = sourceSecret[0].data.values()
        secret.data[destination_key] = value

    return secret
def make_secret(
    name,
    username,
    cert_paths,
    hub_ca,
    owner_references,
    labels=None,
    annotations=None,
):
    """
    Make a k8s secret specification using pre-existing ssl credentials for a given user.

    Parameters
    ----------
    name:
        Name of the secret. Must be unique within the namespace the object is
        going to be created in.
    username:
        The name of the user notebook.
    cert_paths:
        JupyterHub spawners cert_paths dictionary container certificate path references
    hub_ca:
        Path to the hub certificate authority
    labels:
        Labels to add to the secret.
    annotations:
        Annotations to add to the secret.
    """

    secret = V1Secret()
    secret.kind = "Secret"
    secret.api_version = "v1"
    secret.metadata = V1ObjectMeta()
    secret.metadata.name = name
    secret.metadata.annotations = (annotations or {}).copy()
    secret.metadata.labels = (labels or {}).copy()
    secret.metadata.owner_references = owner_references

    secret.data = {}

    with open(cert_paths['keyfile'], 'r') as file:
        encoded = base64.b64encode(file.read().encode("utf-8"))
        secret.data['ssl.key'] = encoded.decode("utf-8")

    with open(cert_paths['certfile'], 'r') as file:
        encoded = base64.b64encode(file.read().encode("utf-8"))
        secret.data['ssl.crt'] = encoded.decode("utf-8")

    with open(cert_paths['cafile'], 'r') as file:
        encoded = base64.b64encode(file.read().encode("utf-8"))
        secret.data["notebooks-ca_trust.crt"] = encoded.decode("utf-8")

    with open(hub_ca, 'r') as file:
        encoded = base64.b64encode(file.read().encode("utf-8"))
        secret.data["notebooks-ca_trust.crt"] = secret.data[
            "notebooks-ca_trust.crt"
        ] + encoded.decode("utf-8")

    return secret
Beispiel #21
0
def make_ingress(name, routespec, target, data):
    """
    Returns an ingress, service, endpoint object that'll work for this service
    """
    meta = V1ObjectMeta(name=name,
                        annotations={
                            'hub.jupyter.org/proxy-data': json.dumps(data),
                            'hub.jupyter.org/proxy-routespec': routespec,
                            'hub.jupyter.org/proxy-target': target
                        },
                        labels={
                            'heritage': 'jupyterhub',
                            'component': 'singleuser-server',
                            'hub.jupyter.org/proxy-route': 'true'
                        })

    if routespec.startswith('/'):
        host = None
        path = routespec
    else:
        host, path = routespec.split('/', 1)

    target_parts = urlparse(target)

    target_ip = target_parts.hostname
    target_port = target_parts.port

    # Make endpoint object
    endpoint = V1Endpoints(kind='Endpoints',
                           metadata=meta,
                           subsets=[
                               V1EndpointSubset(
                                   addresses=[V1EndpointAddress(ip=target_ip)],
                                   ports=[V1EndpointPort(port=target_port)])
                           ])

    # Make service object
    service = V1Service(
        kind='Service',
        metadata=meta,
        spec=V1ServiceSpec(
            ports=[V1ServicePort(port=target_port, target_port=target_port)]))

    # Make Ingress object
    ingress = V1beta1Ingress(
        kind='Ingress',
        metadata=meta,
        spec=V1beta1IngressSpec(rules=[
            V1beta1IngressRule(host=host,
                               http=V1beta1HTTPIngressRuleValue(paths=[
                                   V1beta1HTTPIngressPath(
                                       path=path,
                                       backend=V1beta1IngressBackend(
                                           service_name=name,
                                           service_port=target_port))
                               ]))
        ]))

    return endpoint, service, ingress
Beispiel #22
0
def test_delete_deployment(client, api):
    depl1 = V1Deployment(metadata=V1ObjectMeta(name="depl1"))
    depl2 = V1Deployment(metadata=V1ObjectMeta(name="depl2"))
    v1 = MagicMock()
    client.AppsV1beta1Api.return_value = v1
    v1.list_namespaced_deployment.return_value = V1DeploymentList(
        items=(depl1, depl2))

    delete_deployment("fake_name", "fake_ns")

    v1.list_namespaced_deployment.assert_called_with("fake_ns",
                                                     label_selector=ANY)
    v1.delete_namespaced_deployment.assert_has_calls(calls=[
        call(depl1.metadata.name, "fake_ns", body=ANY),
        call(depl2.metadata.name, "fake_ns", body=ANY)
    ],
                                                     any_order=True)
Beispiel #23
0
    async def test_get_index_func_values(self):
        index = store.new_indexer(store.meta_namespace_key_func,
                                  Indexers(testmodes=test_index_func))

        pod1 = V1Pod(metadata=V1ObjectMeta(name="one", labels={"foo": "bar"}))
        pod2 = V1Pod(metadata=V1ObjectMeta(name="two", labels={"foo": "bar"}))
        pod3 = V1Pod(metadata=V1ObjectMeta(name="tre", labels={"foo": "biz"}))

        await index.add(pod1)
        await index.add(pod2)
        await index.add(pod3)

        keys = await index.list_index_func_values("testmodes")
        self.assertEqual(len(keys), 2)

        for key in keys:
            self.assertIn(key, ("bar", "biz"))
Beispiel #24
0
def helm_upgrade(name, namespace, chart, config_files,
                 config_overrides_implicit, config_overrides_string, version,
                 timeout, force, atomic, cleanup_on_fail):
    # Clear charts and do a helm dep up before installing
    # Clearing charts is important so we don't deploy charts that
    # have been removed from requirements.yaml
    # FIXME: verify if this is actually true
    if os.path.exists(chart):
        shutil.rmtree(os.path.join(chart, 'charts'), ignore_errors=True)
        subprocess.check_call([HELM_EXECUTABLE, 'dep', 'up'], cwd=chart)

    # Create namespace explicitly, since helm3 removes support for it
    # See https://github.com/helm/helm/issues/6794
    # helm2 only creates the namespace if it doesn't exist, so we should be fine
    kubeconfig = os.environ.get("KUBECONFIG", None)

    try:
        kubernetes.config.load_kube_config(config_file=kubeconfig)
    except:
        kubernetes.config.load_incluster_config()

    api = CoreV1Api()
    try:
        api.read_namespace(namespace)
    except rest.ApiException as e:
        if e.status == 404:
            # Create namespace
            print(f"Namespace {namespace} does not exist, creating it...")
            api.create_namespace(
                V1Namespace(metadata=V1ObjectMeta(name=namespace)))
        else:
            raise

    cmd = [
        HELM_EXECUTABLE,
        'upgrade',
        '--wait',
        '--install',
        '--namespace',
        namespace,
        name,
        chart,
    ]
    if version:
        cmd += ['--version', version]
    if timeout:
        cmd += ['--timeout', timeout]
    if force:
        cmd += ['--force']
    if atomic:
        cmd += ['--atomic']
    if cleanup_on_fail:
        cmd += ['--cleanup-on-fail']
    cmd += itertools.chain(*[['-f', cf] for cf in config_files])
    cmd += itertools.chain(*[['--set', v] for v in config_overrides_implicit])
    cmd += itertools.chain(*[['--set-string', v]
                             for v in config_overrides_string])
    subprocess.check_call(cmd)
def make_namespace(name, labels=None, annotations=None):
    """
    Make a k8s namespace specification for a user pod.
    """

    metadata = V1ObjectMeta(
        name=name, labels=(labels or {}).copy(), annotations=(annotations or {}).copy()
    )

    return V1Namespace(metadata=metadata)
def test_create_deployment(client, api):
    v1 = MagicMock()
    client.ExtensionsV1beta1Api.return_value = v1

    v1.list_namespaced_replica_set.return_value = V1ReplicaSetList(items=(
        V1ReplicaSet(metadata=V1ObjectMeta(name="repl1")),
        V1ReplicaSet(metadata=V1ObjectMeta(name="repl2"))
    ))

    delete_replica_set("fake", "fake_ns")

    v1.list_namespaced_replica_set.assert_called_with("fake_ns", label_selector="name in (fake)")
    v1.delete_namespaced_replica_set.assert_has_calls(
        [
            call("repl1", "fake_ns", body=ANY),
            call("repl2", "fake_ns", body=ANY)
        ],
        any_order=True
    )
Beispiel #27
0
    async def test_watch_handler(self):
        s = store.new_store(store.meta_namespace_key_func)
        g = Reflector(TestLW.__new__(TestLW), V1Pod, s, 0)
        fw = watch.new_fake()
        await s.add(V1Pod(metadata=V1ObjectMeta(name="foo")))
        await s.add(V1Pod(metadata=V1ObjectMeta(name="bar")))

        async def aw():
            await fw.add(V1Service(metadata=V1ObjectMeta(name="rejected")))
            await fw.delete(V1Pod(metadata=V1ObjectMeta(name="foo")))
            await fw.modify(
                V1Pod(metadata=V1ObjectMeta(name="bar", resource_version="55"))
            )
            await fw.add(
                V1Pod(metadata=V1ObjectMeta(name="baz", resource_version="32"))
            )
            await fw.stop()

        asyncio.ensure_future(aw())
        options = {}
        await g._watch_handler(fw, options, asyncio.Queue())

        def mk_pod(id_, rv):
            return V1Pod(metadata=V1ObjectMeta(name=id_, resource_version=rv))

        table = [
            {"pod": mk_pod("foo", ""), "exists": False},
            {"pod": mk_pod("rejected", ""), "exists": False},
            {"pod": mk_pod("bar", "55"), "exists": True},
            {"pod": mk_pod("baz", "32"), "exists": True},
        ]
        for item in table:
            obj = s.get(item["pod"])
            exists = obj is not None
            self.assertIs(exists, item["exists"])
            if not exists:
                continue
            self.assertEqual(
                obj.metadata.resource_version, item["pod"].metadata.resource_version
            )

        self.assertEqual(options["resource_version"], "32")
        self.assertEqual(g.last_sync_resource_version(), "32")
Beispiel #28
0
    async def test_multi_index_keys(self):
        index = store.new_indexer(store.meta_namespace_key_func,
                                  Indexers(by_user=test_users_index_func))

        pod1 = V1Pod(metadata=V1ObjectMeta(
            name="one", annotations={"users": "ernie,bert"}))
        pod2 = V1Pod(metadata=V1ObjectMeta(
            name="two", annotations={"users": "bert,oscar"}))
        pod3 = V1Pod(metadata=V1ObjectMeta(
            name="tre", annotations={"users": "ernie,elmo"}))

        await index.add(pod1)
        await index.add(pod2)
        await index.add(pod3)

        expected = {
            "ernie": {"one", "tre"},
            "bert": {"one", "two"},
            "elmo": {"tre"},
            "oscar": {"two"},
        }
        for k, v in expected.items():
            index_results = await index.by_index("by_user", k)
            found = {item.metadata.name for item in index_results}
            self.assertEqual(found, v)

        await index.delete(pod3)
        ernie_pods = await index.by_index("by_user", "ernie")
        self.assertEqual(len(ernie_pods), 1)
        for ernie_pod in ernie_pods:
            self.assertEqual(ernie_pod.metadata.name, "one")

        elmo_pods = await index.by_index("by_user", "elmo")
        self.assertEqual(len(elmo_pods), 0)

        copy_of_pod2 = copy.deepcopy(pod2)
        copy_of_pod2.metadata.annotations["users"] = "oscar"
        await index.update(copy_of_pod2)
        bert_pods = await index.by_index("by_user", "bert")
        self.assertEqual(len(bert_pods), 1)
        for bert_pod in bert_pods:
            self.assertEqual(bert_pod.metadata.name, "one")
Beispiel #29
0
def mocked_k8s_RbacAuthorizationV1Api(mocker):
    mocked_RbacAuthorizationV1Api_class = mocker.patch('kubernetes.client.RbacAuthorizationV1Api')
    mocker.patch('kubernetes.client.ApiClient')
    rbacAuthorizationV1_instance = mocked_RbacAuthorizationV1Api_class.return_value

    v1_metadata_role = V1ObjectMeta(name="metadata-role")
    v1_policy_rule = V1PolicyRule(verbs=["verb"])
    v1_role = V1ClusterRole(metadata=v1_metadata_role, kind="ClusterRole", rules=[v1_policy_rule])
    v1_cluster_role_list = V1ClusterRoleList(items=[v1_role])

    rbacAuthorizationV1_instance.list_cluster_role.return_value = v1_cluster_role_list
Beispiel #30
0
def test_delete_replica_set(client, api):
    v1 = MagicMock()
    client.AppsV1Api.return_value = v1

    v1.list_namespaced_replica_set.return_value = V1ReplicaSetList(items=(
        V1ReplicaSet(metadata=V1ObjectMeta(name="repl1")),
        V1ReplicaSet(metadata=V1ObjectMeta(name="repl2")),
    ))

    delete_replica_set("fake", "fake_ns")

    v1.list_namespaced_replica_set.assert_called_with(
        "fake_ns", field_selector="metadata.name=fake")
    v1.delete_namespaced_replica_set.assert_has_calls(
        [
            call("repl1", "fake_ns", body=ANY),
            call("repl2", "fake_ns", body=ANY)
        ],
        any_order=True,
    )