Exemple #1
0
 def test_creating_pod_with_resource_limited(self):
     init_test_env(NS)
     run(f'kubectl create -f limited-pod.yaml -n {NS}')
     ensure_pod_phase('limited-pod', 'Running', NS)
     cpu_period = int(run(f'kubectl exec limited-pod -n {NS} -- cat /sys/fs/cgroup/cpu/cpu.cfs_period_us'))
     cpu_quota = int(run(f'kubectl exec limited-pod -n {NS} -- cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us'))
     minikube_cpus = 2
     self.assertEqual(cpu_quota/cpu_period/minikube_cpus, 0.1)  # 最多能使用 10% CPU
Exemple #2
0
    def test_using_config_map_as_env(self):
        run('kubectl delete pod fortune-env-from-configmap', True)
        ensure_pod_phase('fortune-env-from-configmap', 'Deleted')

        run('kubectl create -f fortune-pod-env-configmap.yaml')
        ensure_pod_phase('fortune-env-from-configmap', 'Running')
        ret = run("kubectl exec fortune-env-from-configmap -c html-generator  -- bash -c 'echo $INTERVAL'")
        self.assertEqual(25, int(ret))
Exemple #3
0
    def test_accessing_api_server_with_ambassador_container(self):
        run('kubectl delete pod curl-with-ambassador', True)
        ensure_pod_phase('curl-with-ambassador', 'Deleted')

        run('kubectl create -f curl-with-ambassador.yaml')
        ensure_pod_phase('curl-with-ambassador', 'Running')
        run('kubectl apply -f fabric8-rbac.yaml', True)
        run('kubectl exec curl-with-ambassador -c main -- curl -s localhost:8001'
            )
        run('kubectl delete -f fabric8-rbac.yaml', True)
Exemple #4
0
    def test_exposing_services_to_external_clients_by_nodeport(self):
        run('kubectl delete svc kubia-nodeport', True)

        run('kubectl create -f kubia-svc-nodeport.yaml')
        run('kubectl get svc kubia-nodeport')
        node_port_service = run('minikube service kubia-nodeport --url', True)
        run(f'curl -s {node_port_service}')
Exemple #5
0
    def test_headless_svc(self):
        run('kubectl delete svc kubia-headless', True)
        if get_pod_phase('dnsutils') is None:
            run(
                'kubectl run dnsutils --image=tutum/dnsutils --generator=run-pod/v1 --command -- sleep infinity',
                True)
        ensure_pod_phase('dnsutils')

        run('kubectl create -f kubia-svc-headless.yaml')
        run('kubectl exec dnsutils nslookup kubia-headless')
        run('kubectl get ep kubia-headless')
Exemple #6
0
 def test_scale(self):
     # scale up
     run('kubectl scale rc kubia --replicas=10')
     run('kubectl get rc kubia')
     # scale down
     run('kubectl scale rc kubia --replicas=2')
     run('kubectl get rc kubia')
Exemple #7
0
 def test_creating_external_name_service(self):
     run('kubectl delete svc external-service-external-name', True)
     run('kubectl create -f external-service-external-name.yaml')
     # there should be no clusterIp
     run('kubectl get svc external-service-external-name')
     pod_name = run(
         "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
         True)
     ensure_pod_phase(pod_name)
     run(f'kubectl exec {pod_name} -- curl -s -H "Host: www.baidu.com" external-service-external-name'
         )
Exemple #8
0
 def test_create(self):
     '''
     Run this tc first
     '''
     run('kubectl delete cm fortune-config', True)
     run('kubectl create -f fortune-config.yaml')
     run('kubectl get cm')
     run('kubectl describe cm fortune-config')
Exemple #9
0
 def test_create(self):
     '''
     Run this tc first
     '''
     run('kubectl delete rc kubia', True)
     run('kubectl create -f kubia-rc.yaml')
     run('kubectl get rc')
     run('kubectl get pods --show-labels')
Exemple #10
0
    def test_resource_quota(self):
        init_test_env(NS)

        run(f'kubectl create -f quota-cpu-memory.yaml -n {NS}')
        run(f'kubectl describe quota -n {NS}')

        # when creating a ResourceQuota is that you will also want to create a LimitRange object alongside it.
        stdout = run(f'kubectl create -f kubia-manual.yaml -n {NS} 2>&1; true')
        self.assertIn('must specify limits.cpu,limits.memory,requests.cpu,requests.memory', stdout)
        run(f'kubectl create -f limits.yaml -n {NS}')
        run(f'kubectl create -f kubia-manual.yaml -n {NS}')
        # So having a LimitRange with defaults for those resources can make life a bit easier for people creating pods.
        ensure_pod_phase('kubia-manual', 'Running', NS)
Exemple #11
0
    def test_init_container(self):
        init_test_env(NS)

        run(f'kubectl create -f fortune-client.yaml -n {NS}')
        # The STATUS column shows that zero of one init containers have finished.
        run(f'kubectl get pod -n {NS}')
        run(f'kubectl create -f fortune-server.yaml -n {NS}')
        ensure_pod_phase('fortune-server', 'Running', NS)
        run(f'kubectl get pod fortune-client -n {NS}')
        ensure_pod_phase('fortune-client', 'Running', NS)
Exemple #12
0
 def test_owner_references(self):
     '''
     Although a pod isn’t tied to a ReplicationController, the pod does reference it in the metadata.ownerReferences field,
     which you can use to easily find which ReplicationController a pod belongs to.
     '''
     ret = json.loads(run('kubectl get pod -o json', True))
     for pod in ret['items']:
         metadata = pod['metadata']
         pod_name = metadata['name']
         refs = [(ref['kind'], ref['name'])
                 for ref in metadata['ownerReferences']]
         print(f"{pod_name} => {refs}")
Exemple #13
0
    def test_providing_information_on_process_terminated(self):
        '''
        Show the reason why a container terminated in the pod's status.
        You do this by having the process write a termination message to a specific file in the container's filesystem.
        The default file the process needs to write the message to is /dev/termination-log,
        but it can be changed by setting the `terminationMessagePath` field in the container definition in the pod spec.
        '''
        init_test_env(NS)

        with self.subTest("Terminating unsuccessfully"):
            run(f'kubectl create -f termination-message.yaml -n {NS}')
            ensure_pod_phase('pod-with-termination-message', 'Running', NS)
            stdout = run(
                f'kubectl describe po pod-with-termination-message -n {NS} | grep -C5 "Message:"'
            )
            self.assertIn("I've had enough", stdout)

        with self.subTest("Terminating successfully"):
            run(f'kubectl create -f termination-message-success.yaml -n {NS}')
            ensure_pod_phase('successful-pod-with-termination-message',
                             'Succeeded', NS)
            stdout = run(
                f'kubectl describe po successful-pod-with-termination-message -n {NS} | grep -C5 "Message:"'
            )
            self.assertIn("I've completed my task", stdout)
Exemple #14
0
    def test_auto_scaling_down_with_pod_disruption_budget(self):
        init_test_env(NS)

        run(f"kubectl create pdb kubia-pdb --selector=app=kubia --min-available=2 -n {NS}"
            )
        run(f"kubectl get pdb kubia-pdb -o yaml -n {NS}")

        # create deployment
        run(f'kubectl create -f deployment.yaml -n {NS}')
        ensure_deploy_ready('kubia', NS)
        ensure_replicas('kubia', 3, 'deploy', NS)

        # create hpa
        run(f'kubectl autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 -n {NS}'
            )
        # pdb has nothing to do with hpa
        ensure_replicas('kubia', 1, 'deploy', NS)
Exemple #15
0
    def test_limit_range(self):
        init_test_env(NS)

        with self.subTest("Enforcing limits"):
            run(f'kubectl create -f limits.yaml -n {NS}')
            stdout = run(f'kubectl create -f limits-pod-too-big.yaml -n {NS} 2>&1; true')
            self.assertIn('must be less than or equal to cpu limit', stdout)

        with self.subTest("Applying default resource requests and limits"):
            run(f'kubectl create -f kubia-manual.yaml -n {NS}')
            ensure_pod_phase('kubia-manual', 'Running', NS)
            default_cpu_request = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.requests.cpu}}'")
            default_cpu_limit = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.limits.cpu}}'")
            default_mem_request = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.requests.memory}}'")
            default_mem_limit = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.limits.memory}}'")
            with open('limits.yaml', 'rb') as fp:
                definition = yaml.load(fp, Loader=yaml.Loader)
                container_limits = [limit for limit in definition['spec']['limits'] if limit['type'] == 'Container'][0]
            self.assertEqual(default_cpu_request, container_limits['defaultRequest']['cpu'])
            self.assertEqual(default_cpu_limit, container_limits['default']['cpu'])
            self.assertEqual(default_mem_request, container_limits['defaultRequest']['memory'])
            self.assertEqual(default_mem_limit, container_limits['default']['memory'])
Exemple #16
0
    def test_empty_dir_vol(self):
        run('kubectl delete pod fortune', True)
        ensure_pod_phase('fortune', 'Deleted')

        run('kubectl create -f fortune-pod.yaml')
        ensure_pod_phase('fortune', 'Running')

        p = subprocess.Popen('kubectl port-forward fortune 8888:80', shell=True)
        time.sleep(1)
        run('curl -s http://localhost:8888')
        p.terminate()
Exemple #17
0
    def test_assigning_sa_to_pod(self):
        with open(os.devnull, 'w') as f:
            with redirect_stdout(f), redirect_stderr(f):
                self.test_create()

        run('kubectl delete po curl-custom-sa', True)
        ensure_pod_phase('curl-custom-sa', 'Deleted')
        run('kubectl create -f curl-custom-sa.yaml')
        ensure_pod_phase('curl-custom-sa', 'Running')
        sec = run("kubectl get sa foo -o jsonpath='{.secrets[0].name}'", True)
        sec_token = run(
            f"kubectl get secret {sec} -o jsonpath='{{.data.token}}' | base64 -D"
        )
        pod_token = run(
            "kubectl exec curl-custom-sa -c main -- cat /var/run/secrets/kubernetes.io/serviceaccount/token"
        )
        self.assertEqual(sec_token, pod_token)
        # If the response is Success, this may be because your cluster doesn’t use the RBAC authorization plugin,
        # or you gave all ServiceAccounts full permissions, like:
        # `kubectl create clusterrolebinding permissive-binding --clusterrole=cluster-admin --group=system:serviceaccounts`
        run("kubectl exec curl-custom-sa -c main -- curl -s localhost:8001/api/v1/pods"
            )
Exemple #18
0
    def test_consistent_state(self):
        now1 = str(datetime.datetime.now())
        time.sleep(1)
        now2 = str(datetime.datetime.now())
        p = subprocess.Popen('kubectl proxy --port=48001', shell=True)
        time.sleep(1)

        run(f'curl -s -X POST -d "{now1}" localhost:48001/api/v1/namespaces/default/pods/kubia-0/proxy/'
            )
        run('curl -s localhost:48001/api/v1/namespaces/default/pods/kubia-0/proxy/'
            )
        run('kubectl delete po kubia-0')
        ensure_pod_phase('kubia-0', 'Running')
        output = run(
            'curl -s localhost:48001/api/v1/namespaces/default/pods/kubia-0/proxy/'
        )
        body = output.split('\n')[1]
        index = body.index(':')
        self.assertEqual(now1, body[index + 2:])

        p.terminate()
        p.wait()
Exemple #19
0
    def test_rolling_back(self):
        '''
        v2 => v3
        In version 3, you'll introduce a bug that makes your app handle only the first four requests properly.
        All requests from the fifth request onward will return an internal server error (HTTP status code 500).
        '''
        with open(os.devnull, 'w') as f:
            with redirect_stdout(f), redirect_stderr(f):
                self.test_update()  # ensure current version is v2

        url = run('minikube service kubia-nodeport --url', True)
        p = subprocess.Popen(
            f'rm -rf /tmp/rollback.log && while true; do echo "`date +%T` - `curl -s {url}`" >> /tmp/rollback.log; sleep 1 ; done',
            shell=True)
        run('kubectl set image deployment kubia nodejs=luksa/kubia:v3 --record'
            )
        run('kubectl rollout status deployment kubia')
        run('echo "=== rollout to v3 successfully ===" >> /tmp/rollback.log',
            True)
        time.sleep(5)  # wait for error log of the app
        run('kubectl rollout history deployment kubia')  # revision history
        run('kubectl rollout undo deployment kubia')  # roll back
        run('kubectl rollout status deployment kubia')
        run('echo "=== roll back to v2 successfully ===" >> /tmp/rollback.log',
            True)
        time.sleep(3)
        p.terminate()
        p.wait()
        run("cat /tmp/rollback.log")
        run('kubectl rollout history deployment kubia')  # revision history
Exemple #20
0
    def test_update(self):
        '''v1 => v2'''
        with open(os.devnull, 'w') as f:
            with redirect_stdout(f), redirect_stderr(f):
                self.test_create()
        # slow down the update process a little,
        # so you can see that the update is indeed performed in a rolling fashion.
        run(
            """kubectl patch deployment kubia -p '{"spec": {"minReadySeconds": 10}}'""",
            True)
        url = run('minikube service kubia-nodeport --url', True)
        p = subprocess.Popen(
            f'rm -rf /tmp/update.log && while true; do echo "`date +%T` - `curl -s {url}`" >> /tmp/update.log; sleep 1 ; done',
            shell=True)

        run('kubectl set image deployment kubia nodejs=luksa/kubia:v2 --record'
            )
        run('kubectl rollout status deployment kubia')
        run('echo "=== rollout successfully ===" >> /tmp/update.log', True)
        time.sleep(5)  # check all requests should hit v2
        p.terminate()
        p.wait()
        run("cat /tmp/update.log")
        run('kubectl get rs')  # old ReplicaSet is still there
Exemple #21
0
    def test_create(self):
        '''v1'''
        run('kubectl delete deploy kubia', True)
        run('kubectl delete svc kubia-nodeport', True)

        run('kubectl create -f kubia-deployment-v1.yaml --record')
        run('kubectl get deploy')
        run('kubectl rollout status deployment kubia'
            )  # used specifically for checking a Deployment's status
        run('kubectl get rs')
        run('kubectl get po')
        run('kubectl create -f kubia-svc-nodeport.yaml')
Exemple #22
0
    def test_pausing_rolling_out(self):
        '''
        v1 => v4 => v2
        '''
        with open(os.devnull, 'w') as f:
            with redirect_stdout(f), redirect_stderr(f):
                self.test_create()  # v1

        # set version to v4
        run('kubectl set image deployment kubia nodejs=luksa/kubia:v4 --record'
            )
        run("kubectl get deploy/kubia -o jsonpath='{.spec.template.spec.containers[0].image}'"
            )
        run('kubectl rollout pause deployment kubia')
        # modify version to v2
        run('kubectl set image deployment kubia nodejs=luksa/kubia:v2 --record'
            )
        run('kubectl rollout resume deployment kubia')
        run('kubectl rollout status deployment kubia')
        run('kubectl rollout history deployment kubia')  # revision history
        run("kubectl get deploy/kubia -o jsonpath='{.spec.template.spec.containers[0].image}'"
            )
Exemple #23
0
    def test_configuring_service_endpoints(self):
        run('kubectl delete svc external-service',
            True)  # also delete Endpoints associated with the Service

        # Manually configuring service endpoints
        run('kubectl create -f external-service.yaml')
        run('kubectl get svc external-service')
        run('kubectl create -f external-service-endpoints.yaml')
        run('kubectl get ep external-service')

        # If you later decide to migrate the external service to pods running inside Kubernetes,
        # you can add a selector to the service, thereby making its Endpoints managed automatically.
        with self.subTest(
                'Migrating the external service to pods running inside Kubernetes by adding a selector to the service'
        ):
            json_def = json.loads(
                run('kubectl get svc external-service -o json', True))
            json_def['spec']['ports'][0]['targetPort'] = 8080
            json_def['spec']['selector'] = {'app': 'kubia'}
            with open('/tmp/external-service-with-selector.yaml', 'wb') as fp:
                fp.write(yaml.dump(json_def).encode('utf-8'))
            run("cat /tmp/external-service-with-selector.yaml")
            run(f'kubectl replace -f /tmp/external-service-with-selector.yaml')
            run('kubectl get svc external-service')
            run('kubectl get ep external-service')

        # by removing the selector from a Service, Kubernetes stops updating its Endpoints.
        with self.subTest("Removing the selector from a Service"):
            json_def = json.loads(
                run('kubectl get svc external-service -o json', True))
            del json_def['spec']['selector']
            with open('/tmp/external-service-remove-selector.yaml',
                      'wb') as fp:
                fp.write(yaml.dump(json_def).encode('utf-8'))
            run("cat /tmp/external-service-remove-selector.yaml")
            run(f'kubectl replace -f /tmp/external-service-remove-selector.yaml'
                )
            run('kubectl get svc external-service')
            run('kubectl get ep external-service')  # no change to Endpoints
Exemple #24
0
    def test_discovering_service(self):
        run('kubectl delete po -l app=kubia',
            True)  # in order to regenerate pod

        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        with self.subTest("Discovering through env"):
            run(f'kubectl exec {pod_name} -- env')
            run(f"kubectl exec {pod_name} -- bash -c 'curl -s http://$KUBIA_SERVICE_HOST:$KUBIA_SERVICE_PORT'"
                )
        with self.subTest("Discovering through DNS"):
            run(f'kubectl exec {pod_name} -- curl -s http://kubia.default.svc.cluster.local'
                )
            run(f'kubectl exec {pod_name} -- curl -s http://kubia.default.svc.cluster.local'
                )
            run(f'kubectl exec {pod_name} -- curl -s http://kubia.default')
            run(f'kubectl exec {pod_name} -- curl -s http://kubia')
Exemple #25
0
    def test_session_affinity(self):
        run('kubectl delete svc kubia', True)
        run('kubectl delete svc kubia-session-affinity', True)

        run('kubectl create -f kubia-session-affinity-svc.yaml')
        run('kubectl get svc kubia-session-affinity')
        run('kubectl describe svc kubia-session-affinity')
        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        clusterIp = run(
            "kubectl get svc kubia -o=jsonpath='{.spec.clusterIP}'", True)
        pods = set()
        for i in range(10):
            pods.add(
                run(f"kubectl exec {pod_name} -- curl -s http://{clusterIp}",
                    True))
        self.assertEqual(len(pods), 1)
Exemple #26
0
    def test_create(self):
        if not is_rc_ready('kubia'):
            run('kubectl create -f kubia-rc.yaml')
            ensure_rc_ready('kubia')
        run('kubectl delete svc kubia', True)

        run('kubectl create -f kubia-svc.yaml')
        run('kubectl get svc kubia')
        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        clusterIp = run("kubectl get svc kubia -ojsonpath='{.spec.clusterIP}'",
                        True)
        run(f"kubectl exec {pod_name} -- curl -s http://{clusterIp}")
        pods = set()
        for i in range(10):
            pods.add(
                run(f"kubectl exec {pod_name} -- curl -s http://{clusterIp}",
                    True))
        self.assertGreater(len(pods), 1)
Exemple #27
0
    def test_exposing_services_to_external_clients_by_ingress(self):
        run('kubectl delete svc kubia-nodeport', True)
        run('kubectl create -f kubia-svc-nodeport.yaml', True)
        run('kubectl delete ing kubia', True)

        run('kubectl create -f kubia-ingress.yaml')
        run('kubectl get ing kubia')
        # When running on cloud providers, the address may take time to appear,
        # because the Ingress controller provisions a load balancer behind the scenes.
        ing_ip = get_ingress_address('kubia')
        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        run(f'kubectl exec {pod_name} -- curl -s {ing_ip}')
        run(f'kubectl exec {pod_name} -- curl -s -H "Host: kubia.example.com" {ing_ip}'
            )

        with self.subTest("Configuring Ingress to handle TLS traffic"):
            run('kubectl delete secret tls tls-secret', True)
            # prepare cert first
            run('openssl genrsa -out tls.key 2048', True)
            run(
                'openssl req -new -x509 -key tls.key -out tls.cert -days 360 -subj /CN=kubia.example.com',
                True)
            run('kubectl create secret tls tls-secret --cert=tls.cert --key=tls.key'
                )

            run('kubectl apply -f kubia-ingress-tls.yaml')
            run(f'kubectl exec {pod_name} -- curl -k -s -H "Host: kubia.example.com" https://{ing_ip}'
                )
Exemple #28
0
 def test_exposing_services_to_external_clients_by_loadbalancer(self):
     run('kubectl delete svc kubia-loadbalancer', True)
     run('kubectl create -f kubia-svc-loadbalancer.yaml')
     run('kubectl get svc kubia-loadbalancer')
Exemple #29
0
 def test_create_service_account(self):
     run('kubectl delete serviceaccount foo', True)
     run('kubectl create serviceaccount foo')
     run('kubectl describe sa foo')
     sec = run("kubectl get sa foo -o jsonpath='{.secrets[0].name}'", True)
     run(f'kubectl describe secret {sec}')
Exemple #30
0
    def test_rbac_with_role_and_rolebinding(self):
        '''
        minikube start --extra-config=apiserver.authorization-mode=RBAC
        '''
        run('kubectl delete ns foo', True)
        run('kubectl delete ns bar', True)
        ensure_namespace_phase('foo', 'Deleted')
        ensure_namespace_phase('bar', 'Deleted')

        run('kubectl create ns foo')
        # luksa/kubectl-proxy will run the proxy which will take care of authentication and HTTPS,
        # so you can focus on the authorization aspect of API server security.
        run('kubectl run test --image=luksa/kubectl-proxy -n foo')

        run('kubectl create ns bar')
        run('kubectl run test --image=luksa/kubectl-proxy -n bar')

        pod_name = run(
            "kubectl get pod -n foo -o jsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name, expected_phase='Running', ns='foo')
        stdout = run(
            f"kubectl exec {pod_name} -n foo -- curl -s localhost:8001/api/v1/namespaces/foo/services"
        )
        # The default permissions for a ServiceAccount don't allow it to list or modify any resources.
        self.assertEqual(json.loads(stdout)['code'], 403)

        # These two Roles will allow you to list Services in the foo and bar namespaces from within your two pods
        run('kubectl create -f service-reader.yaml -n foo'
            )  # create role in namespace foo
        # Instead of creating the Role from a YAML file, you could also create it with the special kubectl create role command
        run('kubectl create role service-reader --verb=get --verb=list --resource=services -n bar'
            )

        # Binding Role to ServiceAccount
        run('kubectl create rolebinding test --role=service-reader --serviceaccount=foo:default -n foo'
            )

        # now it is ok
        stdout = run(
            f"kubectl exec {pod_name} -n foo -- curl -s localhost:8001/api/v1/namespaces/foo/services"
        )
        self.assertEqual(json.loads(stdout)['kind'], "ServiceList")

        with self.subTest(
                "Including serviceaccounts from other namespaces in a rolebinding"
        ):
            run('kubectl get rolebindings test -o yaml -n foo'
                )  # before modification
            json_def = json.loads(
                run('kubectl get rolebindings test -o json -n foo', True))
            json_def['subjects'][0]['namespace'] = 'bar'
            with open('/tmp/bind_service_account_from_other_namespace.yaml',
                      'wb') as fp:
                fp.write(yaml.dump(json_def).encode('utf-8'))
            run('kubectl replace -f /tmp/bind_service_account_from_other_namespace.yaml -n foo'
                )
            run('kubectl get rolebindings test -o yaml -n foo'
                )  # after modification
            # verify
            bar_pod_name = run(
                "kubectl get pod -n bar -o jsonpath='{.items[0].metadata.name}'",
                True)
            ensure_pod_phase(bar_pod_name, expected_phase='Running', ns='bar')
            # can see service in foo namespace
            stdout = run(
                f"kubectl exec {bar_pod_name} -n bar -- curl -s localhost:8001/api/v1/namespaces/foo/services"
            )
            self.assertEqual(json.loads(stdout)['kind'], "ServiceList")