Esempio n. 1
0
    def test_resource_quota(self):
        init_test_env(NS)

        run(f'kubectl create -f quota-cpu-memory.yaml -n {NS}')
        run(f'kubectl describe quota -n {NS}')

        # when creating a ResourceQuota is that you will also want to create a LimitRange object alongside it.
        stdout = run(f'kubectl create -f kubia-manual.yaml -n {NS} 2>&1; true')
        self.assertIn('must specify limits.cpu,limits.memory,requests.cpu,requests.memory', stdout)
        run(f'kubectl create -f limits.yaml -n {NS}')
        run(f'kubectl create -f kubia-manual.yaml -n {NS}')
        # So having a LimitRange with defaults for those resources can make life a bit easier for people creating pods.
        ensure_pod_phase('kubia-manual', 'Running', NS)
Esempio n. 2
0
    def test_pre_stop_hook(self):
        init_test_env(NS)
        with self.subTest("Using an command hook handler"):
            run(f'kubectl create -f pre-stop-hook-command.yaml -n {NS}')
            ensure_pod_phase('pod-with-prestop-hook', 'Running', NS)
            time.sleep(60)
            run(f'kubectl describe pod pod-with-prestop-hook -n {NS}')
            run(f'kubectl get pod pod-with-prestop-hook -n {NS}')

        with self.subTest("Using an HTTP GET hook handler"):
            run(f'kubectl create -f pre-stop-hook-httpget.yaml -n {NS}')
            ensure_pod_phase('pod-with-prestop-http-hook', 'Running', NS)
            time.sleep(60)
            run(f'kubectl describe pod pod-with-prestop-http-hook -n {NS}')
            run(f'kubectl get pod pod-with-prestop-http-hook -n {NS}')
Esempio n. 3
0
    def test_auto_scaling_down_with_pod_disruption_budget(self):
        init_test_env(NS)

        run(f"kubectl create pdb kubia-pdb --selector=app=kubia --min-available=2 -n {NS}"
            )
        run(f"kubectl get pdb kubia-pdb -o yaml -n {NS}")

        # create deployment
        run(f'kubectl create -f deployment.yaml -n {NS}')
        ensure_deploy_ready('kubia', NS)
        ensure_replicas('kubia', 3, 'deploy', NS)

        # create hpa
        run(f'kubectl autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 -n {NS}'
            )
        # pdb has nothing to do with hpa
        ensure_replicas('kubia', 1, 'deploy', NS)
Esempio n. 4
0
    def test_node_affinity(self):
        '''
        Run this testcase under EKS with at least 2 worker nodes
        '''
        init_test_env(NS)
        nodes = run(
            f"kubectl get node -o jsonpath='{{.items[*].metadata.name}}'",
            True).split()
        for node in nodes:
            run(f'kubectl label node {node} gpu-', True)  # delete label first
            run(f'kubectl label node {node} availability-zone-', True)
            run(f'kubectl label node {node} share-type-', True)

        with self.subTest("Specifying hard node affinity rules"):
            node = random.choice(nodes)
            run(f'kubectl label node {node} gpu=true')
            run(f'kubectl create -f kubia-gpu-nodeaffinity.yaml -n {NS}')
            ensure_pod_phase('kubia-gpu', 'Running', NS)
            stdout = run(f'kubectl get pod kubia-gpu -o wide -n {NS}')
            self.assertIn(node, stdout)

        with self.subTest("Prioritizing nodes when scheduling a pod"):
            node1 = nodes[0]
            node2 = nodes[1]
            run(f'kubectl label node {node1} availability-zone=zone1', True)
            run(f'kubectl label node {node1} share-type=dedicated', True)
            run(f'kubectl label node {node2} availability-zone=zone2', True)
            run(f'kubectl label node {node2} share-type=shared', True)
            run(f'kubectl get node -L availability-zone -L share-type')
            run(f'kubectl create -f preferred-deployment.yaml -n {NS}')
            ensure_deploy_ready('pref', NS)
            # Nodes whose 'availability-zone' and 'share-type' labels match the pod's node affinity are ranked the highest.
            # Next come the 'shared' nodes in 'zone1', then come the 'dedicated' nodes in the other zones,
            # and at the lowest priority are all the other nodes.
            run(f'kubectl get po -l app=pref -o wide -n {NS}')
            node1_num = int(
                run(
                    f'kubectl get po -l app=pref -o wide -n {NS} | grep {node1} | wc -l',
                    True))
            node2_num = int(
                run(
                    f'kubectl get po -l app=pref -o wide -n {NS} | grep {node2} | wc -l',
                    True))
            self.assertGreater(node1_num, node2_num)
Esempio n. 5
0
    def test_creating_pod_with_resource_requested(self):
        init_test_env(NS)
        run(f'kubectl create -f requests-pod.yaml -n {NS}')
        ensure_pod_phase('requests-pod', 'Running', NS)
        # The Minikube VM, which is where this example is running, has two CPU cores allotted to it.
        # That's why the process is shown consuming 50% of the whole CPU.
        run(f'kubectl exec requests-pod -n {NS} -- top -bn1')


        with self.subTest("Creating a pod that doesn't fit on any node"):
            run(f"kubectl run requests-pod-2 --image=busybox --restart Never --requests='cpu=800m,memory=20Mi' -n {NS} -- dd if=/dev/zero of=/dev/null")
            ensure_pod_phase('requests-pod-2', 'Running', NS)
            time.sleep(5)
            run(f"kubectl run requests-pod-3 --image=busybox --restart Never --requests='cpu=1,memory=20Mi' -n {NS} -- dd if=/dev/zero of=/dev/null")
            ensure_pod_phase('requests-pod-3', 'Pending', NS)
            stdout = run(f"kubectl get po requests-pod-3 -n {NS} -o jsonpath='{{.status.conditions[0].message}}'")
            self.assertIn("Insufficient cpu", stdout)
            run(f'kubectl delete po requests-pod-2 -n {NS}')
            ensure_pod_phase('requests-pod-2', 'Deleted', NS)
            ensure_pod_phase('requests-pod-3', 'Running', NS)
Esempio n. 6
0
    def test_auto_scaling_based_on_cpu(self):
        init_test_env(NS)

        # create deployment first
        run(f'kubectl create -f deployment.yaml -n {NS}')
        ensure_deploy_ready('kubia', NS)
        ensure_replicas('kubia', 3, 'deploy', NS)

        # create HPA (have to enable Heapster and metrics-server:)
        # minikube addons enable heapster
        # minikube addons enable metrics-server
        run(f'kubectl autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 -n {NS}'
            )

        # show HPA
        run(f'kubectl get hpa kubia -o yaml -n {NS}')

        # Because you're running three pods that are currently receiving no requests,
        # which means their CPU usage should be close to zero,
        # you should expect the Autoscaler to scale them down to a single pod.
        ensure_replicas('kubia', 1, 'deploy', NS)

        # expose pod to service
        run(f'kubectl expose deployment kubia --port=80 --target-port=8080 -n {NS}'
            )

        # create loadgenerator pod
        run(f'kubectl run --restart=Never loadgenerator --image=busybox -n {NS} -- sh -c "while true; do wget -O - -q http://kubia.{NS}; done"'
            )
        ensure_pod_phase('loadgenerator', 'Running', NS)

        # autoscaler increase the number of replicas.
        ensure_replicas('kubia', 4, 'deploy', NS)
        run(f'kubectl top pod -n {NS}')
        run(f'kubectl get hpa -n {NS}')
        cpu_util_percent = int(
            run(f"kubectl get hpa kubia -n {NS} -o jsonpath='{{.status.currentCPUUtilizationPercentage}}'"
                ))
        self.assertLessEqual(cpu_util_percent / 4, 30)
Esempio n. 7
0
    def test_limit_range(self):
        init_test_env(NS)

        with self.subTest("Enforcing limits"):
            run(f'kubectl create -f limits.yaml -n {NS}')
            stdout = run(f'kubectl create -f limits-pod-too-big.yaml -n {NS} 2>&1; true')
            self.assertIn('must be less than or equal to cpu limit', stdout)

        with self.subTest("Applying default resource requests and limits"):
            run(f'kubectl create -f kubia-manual.yaml -n {NS}')
            ensure_pod_phase('kubia-manual', 'Running', NS)
            default_cpu_request = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.requests.cpu}}'")
            default_cpu_limit = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.limits.cpu}}'")
            default_mem_request = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.requests.memory}}'")
            default_mem_limit = run(f"kubectl get po kubia-manual -n {NS} -o jsonpath='{{.spec.containers[0].resources.limits.memory}}'")
            with open('limits.yaml', 'rb') as fp:
                definition = yaml.load(fp, Loader=yaml.Loader)
                container_limits = [limit for limit in definition['spec']['limits'] if limit['type'] == 'Container'][0]
            self.assertEqual(default_cpu_request, container_limits['defaultRequest']['cpu'])
            self.assertEqual(default_cpu_limit, container_limits['default']['cpu'])
            self.assertEqual(default_mem_request, container_limits['defaultRequest']['memory'])
            self.assertEqual(default_mem_limit, container_limits['default']['memory'])
Esempio n. 8
0
    def test_pod_affinity(self):
        '''
        Run this testcase under EKS with at least 2 worker nodes
        '''
        init_test_env(NS)
        # First, deploy the backend pod
        run(f'kubectl run backend -l app=backend --image busybox -n {NS} -- sleep 999999'
            )
        ensure_deploy_ready('backend', NS)

        with self.subTest(
                "Using inter-pod affinity to deploy pods on the same node"):
            '''
            Deploy a backend pod and five frontend pod replicas with pod affinity configured
            so that they're all deployed on the same node as the backend pod.
            '''
            run(f'kubectl create -f frontend-podaffinity-host.yaml -n {NS}')
            ensure_deploy_ready('frontend', NS)
            run(f'kubectl get po -o wide -n {NS}')

            backend_node = run(
                f"kubectl get po -l app=backend -n {NS} -o jsonpath='{{.items[*].spec.nodeName}}'",
                True)
            frontend_nodes = run(
                f"kubectl get po -l app=frontend -n {NS} -o jsonpath='{{.items[*].spec.nodeName}}'",
                True).split()
            self.assertEqual(len(set(frontend_nodes)), 1)
            self.assertEqual(frontend_nodes[0], backend_node)

            # If you now delete the backend pod, the Scheduler will schedule the pod to origin node.
            # You can confirm the Scheduler takes other pods' pod affinity rules INTO ACCOUNT.
            old_backend_pod = run(
                f"kubectl get po -l app=backend -n {NS} -o jsonpath='{{.items[*].metadata.name}}'",
                True)
            run(f"kubectl delete pod -l app=backend -n {NS}")
            ensure_pod_phase(old_backend_pod, 'Deleted', NS)
            new_backend_node = run(
                f"kubectl get po -l app=backend -n {NS} -o jsonpath='{{.items[*].spec.nodeName}}'",
                True)
            self.assertEqual(backend_node, new_backend_node)

        with self.subTest("Expressing pod affinity preferences"):
            run(f'kubectl create -f frontend-podaffinity-preferred-host.yaml -n {NS}'
                )
            ensure_deploy_ready('frontend-pref', NS)
            run(f"kubectl get pod -l app=frontend-pref -o wide -n {NS}")
            frontend_pref_nodes = run(
                f"kubectl get po -l app=frontend-pref -n {NS} -o jsonpath='{{.items[*].spec.nodeName}}'",
                True).split()
            backend_node_num = frontend_pref_nodes.count(backend_node)
            other_node_num = len(frontend_pref_nodes) - backend_node_num
            # Scheduler will prefer backend_node for frontend-pref pods,
            # but may schedule pods to other nodes as well.
            self.assertGreater(backend_node_num, other_node_num)

        with self.subTest(
                "Using anti-affinity to spread apart pods of the same deployment"
        ):
            init_test_env(NS)  # remove test history first
            run(f'kubectl create -f frontend-podantiaffinity-host.yaml -n {NS}'
                )
            ensure_replicas('frontend-anti', 3, 'deploy', NS)

            running_pods = run(
                f"""kubectl get pod -n {NS} -o jsonpath='{{.items[?(@.status.phase=="Running")].metadata.name}}'""",
                True).split()
            pending_pods = run(
                f"""kubectl get pod -n {NS} -o jsonpath='{{.items[?(@.status.phase=="Pending")].metadata.name}}'""",
                True).split()
            run(f'kubectl get pod -n {NS} -o wide')
            node_num = len(
                run(
                    f"kubectl get node -o jsonpath='{{.items[*].metadata.name}}'",
                    True).split())
            # Every node has only one pod, remaining pods are all Pending,
            # because the Scheduler isn't allowed to schedule them to the same nodes.
            self.assertEqual(len(running_pods), node_num)
            self.assertEqual(len(pending_pods), 5 - node_num)
Esempio n. 9
0
    def test_configuring_container_security_context(self):
        init_test_env(NAME_SPACE)
        run(f'kubectl run pod-with-defaults --image alpine --restart Never --namespace {NAME_SPACE} -- /bin/sleep 999999'
            )
        ensure_pod_phase("pod-with-defaults", 'Running', NAME_SPACE)

        with self.subTest("Running a container as a specific user"):
            run(f'kubectl create -f pod-as-user-guest.yaml -n {NAME_SPACE}')
            ensure_pod_phase('pod-as-user-guest', 'Running', NAME_SPACE)
            stdout = run(
                f'kubectl exec pod-as-user-guest -n {NAME_SPACE} -- id -u')
            self.assertEqual(stdout, '405')

        with self.subTest("Running container in privileged mode"):
            run(f'kubectl create -f pod-privileged.yaml -n {NAME_SPACE}')
            ensure_pod_phase('pod-privileged', 'Running', NAME_SPACE)
            stdout1 = run(
                f'kubectl exec pod-with-defaults -n {NAME_SPACE} -- ls /dev')
            stdout2 = run(
                f'kubectl exec pod-privileged -n {NAME_SPACE} -- ls /dev')
            # the privileged container sees all the host node's devices. This means it can use any device freely.
            self.assertGreater(len(stdout2.split()), len(stdout1.split()))

        with self.subTest(
                "Adding individual kernel capabilities to a container"):
            stdout = run(
                f'kubectl exec pod-with-defaults -n {NAME_SPACE} -- date -s "12:00:00" 2>&1'
            )
            self.assertIn("can't", stdout)
            run(f'kubectl create -f pod-add-settime-capability.yaml -n {NAME_SPACE}'
                )
            ensure_pod_phase('pod-add-settime-capability', 'Running',
                             NAME_SPACE)
            run(f'kubectl exec pod-add-settime-capability -n {NAME_SPACE} -- date +%T -s "12:00:00"'
                )
            stdout = run(
                f'kubectl exec pod-add-settime-capability -n {NAME_SPACE} -- date +%T'
            )
            self.assertIn('12:00:0', stdout)
            run(
                'minikube ssh date'
            )  # node date will be changed, but it can be changed back quickly due to NTP.

        with self.subTest('Dropping capabilities from a container'):
            run(f'kubectl create -f pod-drop-chown-capability.yaml -n {NAME_SPACE}'
                )
            ensure_pod_phase('pod-drop-chown-capability', 'Running',
                             NAME_SPACE)
            stdout = run(
                f'kubectl exec pod-drop-chown-capability -n {NAME_SPACE} -- chown guest /tmp 2>&1',
                True)
            self.assertIn('Operation not permitted', stdout)

        with self.subTest(
                "Preventing processes from writing to the container's filesystem"
        ):
            run(f'kubectl create -f pod-with-readonly-filesystem.yaml -n {NAME_SPACE}'
                )
            ensure_pod_phase('pod-with-readonly-filesystem', 'Running',
                             NAME_SPACE)
            stdout = run(
                f'kubectl exec pod-with-readonly-filesystem -n {NAME_SPACE} -- touch /newfile 2>&1',
                True)
            self.assertIn('Read-only', stdout)
            run(f'kubectl exec pod-with-readonly-filesystem -n {NAME_SPACE} -- touch /volume/newfile'
                )
            stdout = run(
                f'kubectl exec pod-with-readonly-filesystem -n {NAME_SPACE} -- ls -l /volume'
            )
            self.assertIn('newfile', stdout)

        with self.subTest(
                "Sharing volumes when containers run as different users"):
            run(f'kubectl create -f pod-with-shared-volume-fsgroup.yaml -n {NAME_SPACE}'
                )
            ensure_pod_phase('pod-with-shared-volume-fsgroup', 'Running',
                             NAME_SPACE)
            run(f'kubectl exec pod-with-shared-volume-fsgroup -n {NAME_SPACE} -c first -- id'
                )
            stdout = run(
                f'kubectl exec pod-with-shared-volume-fsgroup -n {NAME_SPACE} -c first -- id -G',
                True)
            self.assertEqual(stdout.split(), ['0', '555', '666', '777'])
            stdout = run(
                f'kubectl exec pod-with-shared-volume-fsgroup -n {NAME_SPACE} -c first -- ls -l / | grep volume'
            )
            self.assertIn('555', stdout)
            # fsGroup security context property is used when the process cre- ates files IN A VOLUME
            run(f'kubectl exec pod-with-shared-volume-fsgroup -n {NAME_SPACE} -c first -- touch /tmp/foo'
                )
            stdout = run(
                f'kubectl exec pod-with-shared-volume-fsgroup -n {NAME_SPACE} -c first -- ls -l /tmp/'
            )
            self.assertNotIn('555', stdout)