def mock_cluster_connector():
    with mock.patch('clusterman.kubernetes.kubernetes_cluster_connector.kubernetes'), \
            mock.patch('clusterman.kubernetes.kubernetes_cluster_connector.staticconf'):
        mock_cluster_connector = KubernetesClusterConnector(
            'kubernetes-test', 'bar')
        mock_cluster_connector._nodes_by_ip = {
            '10.10.10.1':
            KubernetesNode(metadata=V1ObjectMeta(name='node1'),
                           status=V1NodeStatus(allocatable={
                               'cpu': '4',
                               'gpu': 2
                           },
                                               capacity={
                                                   'cpu': '4',
                                                   'gpu': '2'
                                               })),
            '10.10.10.2':
            KubernetesNode(metadata=V1ObjectMeta(name='node2'),
                           status=V1NodeStatus(allocatable={'cpu': '6.5'},
                                               capacity={'cpu': '8'}))
        }
        mock_cluster_connector._pods_by_ip = {
            '10.10.10.1': [],
            '10.10.10.2': [
                V1Pod(metadata=V1ObjectMeta(name='pod1'),
                      status=V1PodStatus(phase='Running'),
                      spec=V1PodSpec(containers=[
                          V1Container(name='container1',
                                      resources=V1ResourceRequirements(
                                          requests={'cpu': '1.5'}))
                      ])),
            ]
        }
        return mock_cluster_connector
Esempio n. 2
0
def test_calculate_resource_utilization_for_kube_nodes():
    fake_nodes = [
        V1Node(
            status=V1NodeStatus(
                allocatable={
                    "cpu": "480",
                    "ephemeral-storage": "180Mi",
                    "memory": "730Mi",
                    "nvidia.com/gpu": "2",
                },
                capacity={
                    "cpu": "500",
                    "ephemeral-storage": "200Mi",
                    "memory": "750Mi",
                    "nvidia.com/gpu": "5",
                },
            )
        )
    ]
    free = metastatus_lib.calculate_resource_utilization_for_kube_nodes(
        nodes=fake_nodes
    )["free"]

    assert free.cpus == 480
    assert free.mem == 730
    assert free.disk == 180
    assert free.gpus == 2
Esempio n. 3
0
def test_get_kube_cpu_status():
    fake_nodes = [
        V1Node(status=V1NodeStatus(allocatable={"cpu": "1"}, capacity={"cpu": "3"}))
    ]
    total, used, available = metastatus_lib.get_kube_cpu_status(fake_nodes)
    assert total == 3
    assert used == 2
    assert available == 1
Esempio n. 4
0
    def __init__(self, data: Dict[str, str]) -> None:
        """Create a mock kubernetes cluster.

        data is the mock registry data to ensure images exist
        and to choose tags."""
        self.data = data
        self.daemonsets: Dict[str, Any] = {}

        # Create four nodes in our pretend cluster.  One we will pull to, one
        # we won't, and two will be configured to pull to but will be tainted
        # or unschedulable.  This uses the python kubernetes objects to look
        # like what the client returns.
        n1 = V1Node(
            metadata=V1ObjectMeta(name="n1", labels={"k1": "v1"}),
            spec=V1NodeSpec(),
            status=V1NodeStatus(
                images=[V1ContainerImage(names=["<none>@<none>"])]
            ),
        )
        n2 = V1Node(
            metadata=V1ObjectMeta(name="n2", labels={"k2": "v2"}),
            spec=V1NodeSpec(),
            status=V1NodeStatus(
                images=[V1ContainerImage(names=["<none>@<none>"])]
            ),
        )
        n3 = V1Node(
            metadata=V1ObjectMeta(name="n3", labels={"k1": "v1"}),
            spec=V1NodeSpec(unschedulable=True),
            status=V1NodeStatus(
                images=[V1ContainerImage(names=["<none>@<none>"])]
            ),
        )
        n4 = V1Node(
            metadata=V1ObjectMeta(name="n4", labels={"k1": "v1"}),
            spec=V1NodeSpec(
                taints=[
                    V1Taint(effect="NoSchedule", key="tainted", value="value")
                ]
            ),
            status=V1NodeStatus(
                images=[V1ContainerImage(names=["<none>@<none>"])]
            ),
        )

        self.nodes = [n1, n2, n3, n4]
def test_get_kube_memory_status():
    fake_nodes = [
        V1Node(status=V1NodeStatus(allocatable={"memory": "1Gi"},
                                   capacity={"memory": "4Gi"}))
    ]
    total, used, available = metastatus_lib.get_kube_memory_status(fake_nodes)
    assert total == 4 * 1024
    assert used == 3 * 1024
    assert available == 1 * 1024
def test_get_kube_disk_status():
    fake_nodes = [
        V1Node(status=V1NodeStatus(
            allocatable={"ephemeral-storage": "1Ti"},
            capacity={"ephemeral-storage": "4Ti"},
        ))
    ]
    total, used, available = metastatus_lib.get_kube_disk_status(fake_nodes)
    assert total == 4 * 1024**2
    assert used == 3 * 1024**2
    assert available == 1 * 1024**2
Esempio n. 7
0
def test_get_kube_cpu_status():
    fake_nodes = [
        V1Node(status=V1NodeStatus(
            allocatable={
                'cpu': '1',
            },
            capacity={
                'cpu': '3',
            },
        ), ),
    ]
    total, used, available = metastatus_lib.get_kube_cpu_status(fake_nodes)
    assert total == 3
    assert used == 2
    assert available == 1
Esempio n. 8
0
def test_calculate_resource_utilization_for_kube_nodes():
    fake_nodes = [
        V1Node(
            metadata=V1ObjectMeta(name="fake_node1"),
            status=V1NodeStatus(
                allocatable={
                    "cpu": "500",
                    "ephemeral-storage": "200Mi",
                    "memory": "750Mi",
                },
            ),
        )
    ]
    fake_pods_by_node = {
        "fake_node1": [
            V1Pod(
                metadata=V1ObjectMeta(name="pod1"),
                status=V1PodStatus(phase="Running"),
                spec=V1PodSpec(
                    containers=[
                        V1Container(
                            name="container1",
                            resources=V1ResourceRequirements(
                                requests={
                                    "cpu": "20",
                                    "ephemeral-storage": "20Mi",
                                    "memory": "20Mi",
                                }
                            ),
                        )
                    ]
                ),
            )
        ]
    }
    free = metastatus_lib.calculate_resource_utilization_for_kube_nodes(
        nodes=fake_nodes, pods_by_node=fake_pods_by_node
    )["free"]

    assert free.cpus == 480
    assert free.mem == 730
    assert free.disk == 180
Esempio n. 9
0
def test_calculate_resource_utilization_for_kube_nodes():
    fake_nodes = [
        V1Node(status=V1NodeStatus(
            allocatable={
                'cpu': '480',
                'ephemeral-storage': '180Mi',
                'memory': '730Mi',
                'nvidia.com/gpu': '2',
            },
            capacity={
                'cpu': '500',
                'ephemeral-storage': '200Mi',
                'memory': '750Mi',
                'nvidia.com/gpu': '5',
            },
        ), ),
    ]
    free = metastatus_lib.calculate_resource_utilization_for_kube_nodes(
        nodes=fake_nodes, )['free']

    assert free.cpus == 480
    assert free.mem == 730
    assert free.disk == 180
    assert free.gpus == 2
def mock_cluster_connector(
    running_pod_1,
    running_pod_2,
    running_pod_on_nonexistent_node,
    unevictable_pod,
    unschedulable_pod,
    pending_pod,
    daemonset_pod,
):
    with mock.patch(
            'clusterman.kubernetes.kubernetes_cluster_connector.kubernetes',
    ), mock.patch(
            'clusterman.kubernetes.kubernetes_cluster_connector.CachedCoreV1Api',
    ) as mock_core_api, PatchConfiguration(
        {
            'clusters': {
                'kubernetes-test': {
                    'kubeconfig_path': '/var/lib/clusterman.conf'
                }
            }
        }, ):
        mock_core_api.return_value.list_node.return_value.items = [
            KubernetesNode(metadata=V1ObjectMeta(
                name='node1', labels={'clusterman.com/pool': 'bar'}),
                           status=V1NodeStatus(
                               allocatable={
                                   'cpu': '4',
                                   'gpu': 2
                               },
                               capacity={
                                   'cpu': '4',
                                   'gpu': '2'
                               },
                               addresses=[
                                   V1NodeAddress(type='InternalIP',
                                                 address='10.10.10.1')
                               ],
                           )),
            KubernetesNode(metadata=V1ObjectMeta(
                name='node2', labels={'clusterman.com/pool': 'bar'}),
                           status=V1NodeStatus(
                               allocatable={'cpu': '6.5'},
                               capacity={'cpu': '8'},
                               addresses=[
                                   V1NodeAddress(type='InternalIP',
                                                 address='10.10.10.2')
                               ],
                           )),
            KubernetesNode(metadata=V1ObjectMeta(
                name='node2', labels={'clusterman.com/pool': 'bar'}),
                           status=V1NodeStatus(
                               allocatable={'cpu': '1'},
                               capacity={'cpu': '8'},
                               addresses=[
                                   V1NodeAddress(type='InternalIP',
                                                 address='10.10.10.3')
                               ],
                           )),
        ]
        mock_core_api.return_value.list_pod_for_all_namespaces.return_value.items = [
            running_pod_1,
            running_pod_2,
            running_pod_on_nonexistent_node,
            unevictable_pod,
            unschedulable_pod,
            pending_pod,
            daemonset_pod,
        ]
        mock_cluster_connector = KubernetesClusterConnector(
            'kubernetes-test', 'bar')
        mock_cluster_connector.reload_state()
        yield mock_cluster_connector