Example #1
0
    def test_create_with_empty_sc(self):
        '''
        storageClassName is set to "" in pvc template, which means using pre-provisioned pv.
        '''
        _clear()

        run('kubectl create -f persistent-volumes-hostpath.yaml')
        run('kubectl create -f kubia-statefulset-sc-empty.yaml')
        ensure_pod_phase('kubia-0', 'Running')
        run('kubectl get sts kubia')
        run('kubectl get po')
        run('kubectl get pvc')
        run('kubectl get pv')
        storage_class = run(
            """kubectl get pvc -o jsonpath='{.items[?(@.metadata.name=="data-kubia-0")].spec.storageClassName}'""",
            True)
        self.assertTrue(not storage_class)
        volume_name = run(
            """kubectl get pvc -o jsonpath='{.items[?(@.metadata.name=="data-kubia-0")].spec.volumeName}'""",
            True)
        self.assertIn(volume_name, ['pv-a', 'pv-b', 'pv-c'])
        phase = run(
            """kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name=="data-kubia-0")].status.phase}'""",
            True)
        self.assertEqual(phase, 'Bound')
Example #2
0
    def test_labels(self):
        run('kubectl get po --show-labels')
        # only interested in certain labels
        run('kubectl get po -L creation_method,env')

        with self.subTest("Modifying labels"):
            # modify labels of existing pods
            # need to use the --overwrite option when changing existing labels.
            run('kubectl label po kubia-manual creation_method=manual --overwrite'
                )
            run('kubectl label po kubia-manual-v2 env=debug --overwrite')
            run('kubectl get po -L creation_method,env')

        with self.subTest('Listing pods using a label selector'):
            run('kubectl get po -l creation_method=manual --show-labels')
            # list all pods that include the 'env' label
            run('kubectl get po -l env --show-labels')
            # list those that don’t have the 'env' label
            run("kubectl get po -l '!env' --show-labels")
            # select pods with the 'creation_method' label with any value other than 'manual'
            run("kubectl get po -l 'creation_method!=manual' --show-labels")
            # select pods with the 'env' label set to either 'debug' or 'devel'
            run('kubectl get po -l "env in (debug,devel)" --show-labels')
            # select pods with the 'env' label not set to either 'prod' or 'devel'
            run('kubectl get po -l "env notin (prod,devel)" --show-labels')

        with self.subTest('Scheduling pods to specific nodes'):
            run('kubectl delete pod kubia-gpu', True)  # cleanup first
            ensure_pod_phase('kubia-gpu', 'Deleted')

            time.sleep(10)
            run('kubectl label node minikube gpu=true --overwrite')
            run('kubectl get node -L gpu')
            run('kubectl create -f kubia-gpu.yaml')
Example #3
0
    def test_exposing_services_to_external_clients_by_ingress(self):
        run('kubectl delete svc kubia-nodeport', True)
        run('kubectl create -f kubia-svc-nodeport.yaml', True)
        run('kubectl delete ing kubia', True)

        run('kubectl create -f kubia-ingress.yaml')
        run('kubectl get ing kubia')
        # When running on cloud providers, the address may take time to appear,
        # because the Ingress controller provisions a load balancer behind the scenes.
        ing_ip = get_ingress_address('kubia')
        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        run(f'kubectl exec {pod_name} -- curl -s {ing_ip}')
        run(f'kubectl exec {pod_name} -- curl -s -H "Host: kubia.example.com" {ing_ip}'
            )

        with self.subTest("Configuring Ingress to handle TLS traffic"):
            run('kubectl delete secret tls tls-secret', True)
            # prepare cert first
            run('openssl genrsa -out tls.key 2048', True)
            run(
                'openssl req -new -x509 -key tls.key -out tls.cert -days 360 -subj /CN=kubia.example.com',
                True)
            run('kubectl create secret tls tls-secret --cert=tls.cert --key=tls.key'
                )

            run('kubectl apply -f kubia-ingress-tls.yaml')
            run(f'kubectl exec {pod_name} -- curl -k -s -H "Host: kubia.example.com" https://{ing_ip}'
                )
Example #4
0
    def test_providing_information_on_process_terminated(self):
        '''
        Show the reason why a container terminated in the pod's status.
        You do this by having the process write a termination message to a specific file in the container's filesystem.
        The default file the process needs to write the message to is /dev/termination-log,
        but it can be changed by setting the `terminationMessagePath` field in the container definition in the pod spec.
        '''
        init_test_env(NS)

        with self.subTest("Terminating unsuccessfully"):
            run(f'kubectl create -f termination-message.yaml -n {NS}')
            ensure_pod_phase('pod-with-termination-message', 'Running', NS)
            stdout = run(
                f'kubectl describe po pod-with-termination-message -n {NS} | grep -C5 "Message:"'
            )
            self.assertIn("I've had enough", stdout)

        with self.subTest("Terminating successfully"):
            run(f'kubectl create -f termination-message-success.yaml -n {NS}')
            ensure_pod_phase('successful-pod-with-termination-message',
                             'Succeeded', NS)
            stdout = run(
                f'kubectl describe po successful-pod-with-termination-message -n {NS} | grep -C5 "Message:"'
            )
            self.assertIn("I've completed my task", stdout)
Example #5
0
 def test_creating_pod_with_resource_limited(self):
     init_test_env(NS)
     run(f'kubectl create -f limited-pod.yaml -n {NS}')
     ensure_pod_phase('limited-pod', 'Running', NS)
     cpu_period = int(run(f'kubectl exec limited-pod -n {NS} -- cat /sys/fs/cgroup/cpu/cpu.cfs_period_us'))
     cpu_quota = int(run(f'kubectl exec limited-pod -n {NS} -- cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us'))
     minikube_cpus = 2
     self.assertEqual(cpu_quota/cpu_period/minikube_cpus, 0.1)  # 最多能使用 10% CPU
Example #6
0
    def test_using_config_map_as_env(self):
        run('kubectl delete pod fortune-env-from-configmap', True)
        ensure_pod_phase('fortune-env-from-configmap', 'Deleted')

        run('kubectl create -f fortune-pod-env-configmap.yaml')
        ensure_pod_phase('fortune-env-from-configmap', 'Running')
        ret = run("kubectl exec fortune-env-from-configmap -c html-generator  -- bash -c 'echo $INTERVAL'")
        self.assertEqual(25, int(ret))
Example #7
0
    def test_init_container(self):
        init_test_env(NS)

        run(f'kubectl create -f fortune-client.yaml -n {NS}')
        # The STATUS column shows that zero of one init containers have finished.
        run(f'kubectl get pod -n {NS}')
        run(f'kubectl create -f fortune-server.yaml -n {NS}')
        ensure_pod_phase('fortune-server', 'Running', NS)
        run(f'kubectl get pod fortune-client -n {NS}')
        ensure_pod_phase('fortune-client', 'Running', NS)
Example #8
0
    def test_accessing_api_server_with_ambassador_container(self):
        run('kubectl delete pod curl-with-ambassador', True)
        ensure_pod_phase('curl-with-ambassador', 'Deleted')

        run('kubectl create -f curl-with-ambassador.yaml')
        ensure_pod_phase('curl-with-ambassador', 'Running')
        run('kubectl apply -f fabric8-rbac.yaml', True)
        run('kubectl exec curl-with-ambassador -c main -- curl -s localhost:8001'
            )
        run('kubectl delete -f fabric8-rbac.yaml', True)
Example #9
0
    def test_empty_dir_vol(self):
        run('kubectl delete pod fortune', True)
        ensure_pod_phase('fortune', 'Deleted')

        run('kubectl create -f fortune-pod.yaml')
        ensure_pod_phase('fortune', 'Running')

        p = subprocess.Popen('kubectl port-forward fortune 8888:80', shell=True)
        time.sleep(1)
        run('curl -s http://localhost:8888')
        p.terminate()
Example #10
0
 def test_creating_external_name_service(self):
     run('kubectl delete svc external-service-external-name', True)
     run('kubectl create -f external-service-external-name.yaml')
     # there should be no clusterIp
     run('kubectl get svc external-service-external-name')
     pod_name = run(
         "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
         True)
     ensure_pod_phase(pod_name)
     run(f'kubectl exec {pod_name} -- curl -s -H "Host: www.baidu.com" external-service-external-name'
         )
Example #11
0
    def test_headless_svc(self):
        run('kubectl delete svc kubia-headless', True)
        if get_pod_phase('dnsutils') is None:
            run(
                'kubectl run dnsutils --image=tutum/dnsutils --generator=run-pod/v1 --command -- sleep infinity',
                True)
        ensure_pod_phase('dnsutils')

        run('kubectl create -f kubia-svc-headless.yaml')
        run('kubectl exec dnsutils nslookup kubia-headless')
        run('kubectl get ep kubia-headless')
Example #12
0
    def test_resource_quota(self):
        init_test_env(NS)

        run(f'kubectl create -f quota-cpu-memory.yaml -n {NS}')
        run(f'kubectl describe quota -n {NS}')

        # when creating a ResourceQuota is that you will also want to create a LimitRange object alongside it.
        stdout = run(f'kubectl create -f kubia-manual.yaml -n {NS} 2>&1; true')
        self.assertIn('must specify limits.cpu,limits.memory,requests.cpu,requests.memory', stdout)
        run(f'kubectl create -f limits.yaml -n {NS}')
        run(f'kubectl create -f kubia-manual.yaml -n {NS}')
        # So having a LimitRange with defaults for those resources can make life a bit easier for people creating pods.
        ensure_pod_phase('kubia-manual', 'Running', NS)
Example #13
0
    def test_using_config_map_as_volume(self):
        run('kubectl delete cm fortune-config', True)
        run('kubectl delete pod fortune-configmap-volume', True)
        ensure_pod_phase('fortune-configmap-volume', 'Deleted')

        # 1, Create cm
        run('kubectl create configmap fortune-config --from-file=configmap-files')
        run('kubectl get configmap fortune-config -o yaml')
        # 2, Create pod
        run('kubectl create -f fortune-pod-configmap-volume.yaml')
        ensure_pod_phase('fortune-configmap-volume', 'Running')

        # 3, Verify
        p = subprocess.Popen('kubectl port-forward fortune-configmap-volume 8888:80', shell=True)
        time.sleep(1)
        run('curl -s -H "Accept-Encoding: gzip" -I localhost:8888')
        p.terminate()
        run('kubectl exec fortune-configmap-volume -c web-server ls /etc/nginx/conf.d')
        run("kubectl exec fortune-configmap-volume -c html-generator -- bash -c 'echo $INTERVAL'")

        with self.subTest("Exposing Certain ConfigMap Entries in the Volume"):
            run('kubectl delete pod fortune-configmap-volume-with-items', True)
            ensure_pod_phase('fortune-configmap-volume-with-items', 'Deleted')

            run('kubectl create -f fortune-pod-configmap-volume-with-items.yaml')
            ensure_pod_phase('fortune-configmap-volume-with-items', 'Running')
            run('kubectl exec fortune-configmap-volume-with-items -c web-server ls /etc/nginx/conf.d')  # only one file
Example #14
0
    def test_dp_pv_vol(self):
        '''
        Dynamical Provision
        '''
        run('kubectl delete pod mongodb', True)
        ensure_pod_phase('mongodb', 'Deleted')
        run('kubectl delete pvc mongodb-pvc', True)
        run('kubectl delete sc fast', True)

        # 1, Define StroageClass
        run('kubectl create -f storageclass-fast-hostpath.yaml')
        run('kubectl get sc')

        # 2, Requesting the storage class in a PersistentVolumeClaim
        run('kubectl create -f mongodb-pvc-dp.yaml')
        run('kubectl get pvc mongodb-pvc')
        run('kubectl get pv')  # Its reclaim policy is Delete, which means the PersistentVolume will be deleted when the PVC is deleted.

        # 3, Create pod using pvc
        run('kubectl create -f mongodb-pod-pvc.yaml')
        ensure_pod_phase('mongodb', 'Running')

        # 4, Verify
        now = str(datetime.datetime.now())
        run(f"""kubectl exec mongodb -- mongo --quiet localhost/mystore --eval 'db.foo.insert({{time: "{now}"}})'""")
        run('kubectl delete pod mongodb')
        ensure_pod_phase('mongodb', 'Deleted')
        run('kubectl create -f mongodb-pod-pvc.yaml')
        ensure_pod_phase('mongodb', 'Running')
        ret = run("""kubectl exec mongodb -- mongo localhost/mystore --quiet --eval 'db.foo.find({}, {_id: 0}).sort({time: -1}).limit(1)'""")
        self.assertEqual(now, json.loads(ret)['time'])
Example #15
0
    def test_pre_stop_hook(self):
        init_test_env(NS)
        with self.subTest("Using an command hook handler"):
            run(f'kubectl create -f pre-stop-hook-command.yaml -n {NS}')
            ensure_pod_phase('pod-with-prestop-hook', 'Running', NS)
            time.sleep(60)
            run(f'kubectl describe pod pod-with-prestop-hook -n {NS}')
            run(f'kubectl get pod pod-with-prestop-hook -n {NS}')

        with self.subTest("Using an HTTP GET hook handler"):
            run(f'kubectl create -f pre-stop-hook-httpget.yaml -n {NS}')
            ensure_pod_phase('pod-with-prestop-http-hook', 'Running', NS)
            time.sleep(60)
            run(f'kubectl describe pod pod-with-prestop-http-hook -n {NS}')
            run(f'kubectl get pod pod-with-prestop-http-hook -n {NS}')
Example #16
0
    def test_create_without_pv_predefined(self):
        _clear()

        run('kubectl create -f kubia-statefulset.yaml')
        ensure_pod_phase('kubia-0', 'Running')
        run('kubectl get sts kubia')
        run('kubectl get po')
        run('kubectl get pvc')
        run('kubectl get pv')
        storage_class = run(
            """kubectl get pvc -o jsonpath='{.items[?(@.metadata.name=="data-kubia-0")].spec.storageClassName}'""",
            True)
        self.assertEqual(storage_class, 'standard')
        phase = run(
            """kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name=="data-kubia-0")].status.phase}'""",
            True)
        self.assertEqual(phase, 'Bound')
Example #17
0
    def test_node_affinity(self):
        '''
        Run this testcase under EKS with at least 2 worker nodes
        '''
        init_test_env(NS)
        nodes = run(
            f"kubectl get node -o jsonpath='{{.items[*].metadata.name}}'",
            True).split()
        for node in nodes:
            run(f'kubectl label node {node} gpu-', True)  # delete label first
            run(f'kubectl label node {node} availability-zone-', True)
            run(f'kubectl label node {node} share-type-', True)

        with self.subTest("Specifying hard node affinity rules"):
            node = random.choice(nodes)
            run(f'kubectl label node {node} gpu=true')
            run(f'kubectl create -f kubia-gpu-nodeaffinity.yaml -n {NS}')
            ensure_pod_phase('kubia-gpu', 'Running', NS)
            stdout = run(f'kubectl get pod kubia-gpu -o wide -n {NS}')
            self.assertIn(node, stdout)

        with self.subTest("Prioritizing nodes when scheduling a pod"):
            node1 = nodes[0]
            node2 = nodes[1]
            run(f'kubectl label node {node1} availability-zone=zone1', True)
            run(f'kubectl label node {node1} share-type=dedicated', True)
            run(f'kubectl label node {node2} availability-zone=zone2', True)
            run(f'kubectl label node {node2} share-type=shared', True)
            run(f'kubectl get node -L availability-zone -L share-type')
            run(f'kubectl create -f preferred-deployment.yaml -n {NS}')
            ensure_deploy_ready('pref', NS)
            # Nodes whose 'availability-zone' and 'share-type' labels match the pod's node affinity are ranked the highest.
            # Next come the 'shared' nodes in 'zone1', then come the 'dedicated' nodes in the other zones,
            # and at the lowest priority are all the other nodes.
            run(f'kubectl get po -l app=pref -o wide -n {NS}')
            node1_num = int(
                run(
                    f'kubectl get po -l app=pref -o wide -n {NS} | grep {node1} | wc -l',
                    True))
            node2_num = int(
                run(
                    f'kubectl get po -l app=pref -o wide -n {NS} | grep {node2} | wc -l',
                    True))
            self.assertGreater(node1_num, node2_num)
Example #18
0
    def test_discovering_service(self):
        run('kubectl delete po -l app=kubia',
            True)  # in order to regenerate pod

        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        with self.subTest("Discovering through env"):
            run(f'kubectl exec {pod_name} -- env')
            run(f"kubectl exec {pod_name} -- bash -c 'curl -s http://$KUBIA_SERVICE_HOST:$KUBIA_SERVICE_PORT'"
                )
        with self.subTest("Discovering through DNS"):
            run(f'kubectl exec {pod_name} -- curl -s http://kubia.default.svc.cluster.local'
                )
            run(f'kubectl exec {pod_name} -- curl -s http://kubia.default.svc.cluster.local'
                )
            run(f'kubectl exec {pod_name} -- curl -s http://kubia.default')
            run(f'kubectl exec {pod_name} -- curl -s http://kubia')
Example #19
0
    def test_session_affinity(self):
        run('kubectl delete svc kubia', True)
        run('kubectl delete svc kubia-session-affinity', True)

        run('kubectl create -f kubia-session-affinity-svc.yaml')
        run('kubectl get svc kubia-session-affinity')
        run('kubectl describe svc kubia-session-affinity')
        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        clusterIp = run(
            "kubectl get svc kubia -o=jsonpath='{.spec.clusterIP}'", True)
        pods = set()
        for i in range(10):
            pods.add(
                run(f"kubectl exec {pod_name} -- curl -s http://{clusterIp}",
                    True))
        self.assertEqual(len(pods), 1)
Example #20
0
    def test_pv_vol(self):
        # run test_host_path_vol first to inflate data to MongoDB
        run('kubectl delete pvc mongodb-pvc', True)
        run('kubectl delete pv mongodb-pv', True)
        run('kubectl delete pod mongodb', True)
        ensure_pod_phase('mongodb', 'Deleted')

        # create pv first
        run('kubectl create -f mongodb-pv-hostpath.yaml')
        run('kubectl get pv')

        # then create pvc
        run('kubectl create -f mongodb-pvc.yaml')
        run('kubectl get pvc')

        # then create pod using pvc
        run('kubectl create -f mongodb-pod-pvc.yaml')
        ensure_pod_phase('mongodb', 'Running')

        run("""kubectl exec mongodb -- mongo localhost/mystore --quiet --eval 'db.foo.find({})'""")
Example #21
0
    def test_creating_pod_with_resource_requested(self):
        init_test_env(NS)
        run(f'kubectl create -f requests-pod.yaml -n {NS}')
        ensure_pod_phase('requests-pod', 'Running', NS)
        # The Minikube VM, which is where this example is running, has two CPU cores allotted to it.
        # That's why the process is shown consuming 50% of the whole CPU.
        run(f'kubectl exec requests-pod -n {NS} -- top -bn1')


        with self.subTest("Creating a pod that doesn't fit on any node"):
            run(f"kubectl run requests-pod-2 --image=busybox --restart Never --requests='cpu=800m,memory=20Mi' -n {NS} -- dd if=/dev/zero of=/dev/null")
            ensure_pod_phase('requests-pod-2', 'Running', NS)
            time.sleep(5)
            run(f"kubectl run requests-pod-3 --image=busybox --restart Never --requests='cpu=1,memory=20Mi' -n {NS} -- dd if=/dev/zero of=/dev/null")
            ensure_pod_phase('requests-pod-3', 'Pending', NS)
            stdout = run(f"kubectl get po requests-pod-3 -n {NS} -o jsonpath='{{.status.conditions[0].message}}'")
            self.assertIn("Insufficient cpu", stdout)
            run(f'kubectl delete po requests-pod-2 -n {NS}')
            ensure_pod_phase('requests-pod-2', 'Deleted', NS)
            ensure_pod_phase('requests-pod-3', 'Running', NS)
Example #22
0
    def test_create(self):
        if not is_rc_ready('kubia'):
            run('kubectl create -f kubia-rc.yaml')
            ensure_rc_ready('kubia')
        run('kubectl delete svc kubia', True)

        run('kubectl create -f kubia-svc.yaml')
        run('kubectl get svc kubia')
        pod_name = run(
            "kubectl get pods -l app=kubia -ojsonpath='{.items[0].metadata.name}'",
            True)
        ensure_pod_phase(pod_name)
        clusterIp = run("kubectl get svc kubia -ojsonpath='{.spec.clusterIP}'",
                        True)
        run(f"kubectl exec {pod_name} -- curl -s http://{clusterIp}")
        pods = set()
        for i in range(10):
            pods.add(
                run(f"kubectl exec {pod_name} -- curl -s http://{clusterIp}",
                    True))
        self.assertGreater(len(pods), 1)
Example #23
0
    def test_using_secret_by_vol(self):
        run("kubectl delete pod fortune-https", True)
        ensure_pod_phase('fortune-https', 'Deleted')
        run('kubectl delete cm fortune-config', True)

        # 1, create cm needed
        run('kubectl create configmap fortune-config --from-file=configmap-https-files'
            )

        # 2, create pod
        run('kubectl create -f fortune-pod-https.yaml')
        ensure_pod_phase('fortune-https', 'Running')

        # verify
        p = subprocess.Popen('kubectl port-forward fortune-https 8443:443',
                             shell=True)
        time.sleep(1)
        run('curl -v -s -k https://localhost:8443')
        p.terminate()
        # The secret volume uses an in-memory filesystem (tmpfs) for the Secret files.
        run("kubectl exec fortune-https -c web-server -- mount | grep certs")
Example #24
0
    def test_create(self):
        '''
        storageClassName is not set in pvc template, which means using dynamically provisioning pv.
        '''
        _clear()

        run('kubectl create -f persistent-volumes-hostpath.yaml')
        run('kubectl create -f kubia-statefulset.yaml')
        ensure_pod_phase('kubia-0', 'Running')
        run('kubectl get sts kubia')
        run('kubectl get po')
        run('kubectl get pvc')
        run('kubectl get pv')
        storage_class = run(
            """kubectl get pvc -o jsonpath='{.items[?(@.metadata.name=="data-kubia-0")].spec.storageClassName}'""",
            True)
        self.assertEqual(storage_class, 'standard')
        phase = run(
            """kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name=="data-kubia-0")].status.phase}'""",
            True)
        self.assertEqual(phase, 'Bound')
Example #25
0
    def test_using_host_node_namespaces(self):
        init_test_env(NAME_SPACE)

        with self.subTest("Using node network namespace"):
            run(f'kubectl create -f pod-with-host-network.yaml -n {NAME_SPACE}'
                )
            ensure_pod_phase('pod-with-host-network', 'Running', NAME_SPACE)
            stdout = run(
                f'kubectl exec pod-with-host-network -n {NAME_SPACE} -- ifconfig'
            )
            self.assertIn('docker0', stdout)

        with self.subTest(
                "Binding host port without using host network namespace"):
            run(f'kubectl create -f kubia-hostport.yaml -n {NAME_SPACE}')
            ensure_pod_phase('kubia-hostport', "Running", NAME_SPACE)
            minikube_ip = run('minikube ip', True)
            run(f'curl -s http://{minikube_ip}:9000/')

        with self.subTest("Using node PID and IPC namespaces"):
            run(f'kubectl create -f pod-with-host-pid-and-ipc.yaml -n {NAME_SPACE}'
                )
            ensure_pod_phase('pod-with-host-pid-and-ipc', 'Running',
                             NAME_SPACE)
            run(f'kubectl exec pod-with-host-pid-and-ipc -n {NAME_SPACE} -- ps aux'
                )
Example #26
0
    def test_limit_range(self):
        init_test_env(NS)

        with self.subTest("Enforcing limits"):
            run(f'kubectl create -f limits.yaml -n {NS}')
            stdout = run(f'kubectl create -f limits-pod-too-big.yaml -n {NS} 2>&1; true')
            self.assertIn('must be less than or equal to cpu limit', stdout)

        with self.subTest("Applying default resource requests and limits"):
            run(f'kubectl create -f kubia-manual.yaml -n {NS}')
            ensure_pod_phase('kubia-manual', 'Running', NS)
            default_cpu_request = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.requests.cpu}}'")
            default_cpu_limit = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.limits.cpu}}'")
            default_mem_request = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.requests.memory}}'")
            default_mem_limit = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.limits.memory}}'")
            with open('limits.yaml', 'rb') as fp:
                definition = yaml.load(fp, Loader=yaml.Loader)
                container_limits = [limit for limit in definition['spec']['limits'] if limit['type'] == 'Container'][0]
            self.assertEqual(default_cpu_request, container_limits['defaultRequest']['cpu'])
            self.assertEqual(default_cpu_limit, container_limits['default']['cpu'])
            self.assertEqual(default_mem_request, container_limits['defaultRequest']['memory'])
            self.assertEqual(default_mem_limit, container_limits['default']['memory'])
Example #27
0
    def test_consistent_state(self):
        now1 = str(datetime.datetime.now())
        time.sleep(1)
        now2 = str(datetime.datetime.now())
        p = subprocess.Popen('kubectl proxy --port=48001', shell=True)
        time.sleep(1)

        run(f'curl -s -X POST -d "{now1}" localhost:48001/api/v1/namespaces/default/pods/kubia-0/proxy/'
            )
        run('curl -s localhost:48001/api/v1/namespaces/default/pods/kubia-0/proxy/'
            )
        run('kubectl delete po kubia-0')
        ensure_pod_phase('kubia-0', 'Running')
        output = run(
            'curl -s localhost:48001/api/v1/namespaces/default/pods/kubia-0/proxy/'
        )
        body = output.split('\n')[1]
        index = body.index(':')
        self.assertEqual(now1, body[index + 2:])

        p.terminate()
        p.wait()
Example #28
0
    def test_assigning_sa_to_pod(self):
        with open(os.devnull, 'w') as f:
            with redirect_stdout(f), redirect_stderr(f):
                self.test_create()

        run('kubectl delete po curl-custom-sa', True)
        ensure_pod_phase('curl-custom-sa', 'Deleted')
        run('kubectl create -f curl-custom-sa.yaml')
        ensure_pod_phase('curl-custom-sa', 'Running')
        sec = run("kubectl get sa foo -o jsonpath='{.secrets[0].name}'", True)
        sec_token = run(
            f"kubectl get secret {sec} -o jsonpath='{{.data.token}}' | base64 -D"
        )
        pod_token = run(
            "kubectl exec curl-custom-sa -c main -- cat /var/run/secrets/kubernetes.io/serviceaccount/token"
        )
        self.assertEqual(sec_token, pod_token)
        # If the response is Success, this may be because your cluster doesn’t use the RBAC authorization plugin,
        # or you gave all ServiceAccounts full permissions, like:
        # `kubectl create clusterrolebinding permissive-binding --clusterrole=cluster-admin --group=system:serviceaccounts`
        run("kubectl exec curl-custom-sa -c main -- curl -s localhost:8001/api/v1/pods"
            )
Example #29
0
    def test_auto_scaling_based_on_cpu(self):
        init_test_env(NS)

        # create deployment first
        run(f'kubectl create -f deployment.yaml -n {NS}')
        ensure_deploy_ready('kubia', NS)
        ensure_replicas('kubia', 3, 'deploy', NS)

        # create HPA (have to enable Heapster and metrics-server:)
        # minikube addons enable heapster
        # minikube addons enable metrics-server
        run(f'kubectl autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 -n {NS}'
            )

        # show HPA
        run(f'kubectl get hpa kubia -o yaml -n {NS}')

        # Because you're running three pods that are currently receiving no requests,
        # which means their CPU usage should be close to zero,
        # you should expect the Autoscaler to scale them down to a single pod.
        ensure_replicas('kubia', 1, 'deploy', NS)

        # expose pod to service
        run(f'kubectl expose deployment kubia --port=80 --target-port=8080 -n {NS}'
            )

        # create loadgenerator pod
        run(f'kubectl run --restart=Never loadgenerator --image=busybox -n {NS} -- sh -c "while true; do wget -O - -q http://kubia.{NS}; done"'
            )
        ensure_pod_phase('loadgenerator', 'Running', NS)

        # autoscaler increase the number of replicas.
        ensure_replicas('kubia', 4, 'deploy', NS)
        run(f'kubectl top pod -n {NS}')
        run(f'kubectl get hpa -n {NS}')
        cpu_util_percent = int(
            run(f"kubectl get hpa kubia -n {NS} -o jsonpath='{{.status.currentCPUUtilizationPercentage}}'"
                ))
        self.assertLessEqual(cpu_util_percent / 4, 30)
Example #30
0
    def test_host_path_vol(self):
        run('kubectl delete pod mongodb', True)
        ensure_pod_phase('mongodb', 'Deleted')

        run('kubectl create -f mongodb-pod-hostpath.yaml')
        ensure_pod_phase('mongodb', 'Running')

        now = str(datetime.datetime.now())
        run(f"""kubectl exec mongodb -- mongo --quiet localhost/mystore --eval 'db.foo.insert({{time: "{now}"}})'""")

        run('kubectl delete pod mongodb', True)
        ensure_pod_phase('mongodb', 'Deleted')

        run('kubectl create -f mongodb-pod-hostpath.yaml')
        ensure_pod_phase('mongodb', 'Running')

        ret = run("""kubectl exec mongodb -- mongo localhost/mystore --quiet --eval 'db.foo.find({}, {_id: 0}).sort({time: -1}).limit(1)'""")
        self.assertEqual(now, json.loads(ret)['time'])