Esempio n. 1
0
def test_cannot_add_pod_if_no_free_ips_available(cluster):
    expected_pod_count = 3
    # 2 IP addresses in a network
    cluster.ip_pools.add('192.168.0.0/31', 'node1')
    # 1 IP address in a network
    cluster.ip_pools.add('192.168.1.0/32', 'node2')

    pods = create_new_pods(cluster,
                           expected_pod_count,
                           open_all_ports=True,
                           wait_for_status='running')

    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        create_new_pods(cluster, num=1, open_all_ports=True)

    # Make sure there are still expected_pod_count of pods
    assert_eq(len(cluster.pods.filter_by_owner()), expected_pod_count)

    # Remove a pod to free an IP an try to add a new one - should succeed
    pods[0].delete()
    create_new_pods(cluster,
                    num=1,
                    open_all_ports=True,
                    wait_for_status='running')

    # It's not possible to create a pod once again
    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        create_new_pods(cluster, num=1, open_all_ports=True)

    # But it's possible to create a pod without a public IP
    create_new_pods(cluster, open_all_ports=False, wait_for_status='running')

    # Make sure there are +1 pods
    pod_count = len(cluster.pods.filter_by_owner())
    assert_eq(pod_count, expected_pod_count + 1)
def check_custom_port(cluster, port, proto, is_open=False):
    msg = "Check that port: '{proto}:{port}' on node '{node}' is '{state}'"
    for node in cluster.node_names:
        utils.log_debug(
            msg.format(proto=proto,
                       port=port,
                       node=node,
                       state='open' if is_open else 'closed'), LOG)
        node_ip = cluster.nodes.get_node_data(node).get('ip')
        if proto == 'tcp' and is_open:
            with paramiko_expect_http_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                res = unregistered_host_port_check(node_ip, port)
                utils.assert_in('Directory listing for /', res)
        elif proto == 'tcp' and not is_open:
            with paramiko_expect_http_server(cluster, node, port), \
                utils.assert_raises(NonZeroRetCodeException,
                                    expected_ret_codes=CURL_CONNECTION_ERRORS):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_port_check(node_ip, port)
        elif proto == 'udp' and is_open:
            with paramiko_expect_udp_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                out = unregistered_host_udp_check(node_ip, port)
                utils.assert_eq('PONG', out)
        else:
            with paramiko_expect_udp_server(cluster, node, port), \
                utils.assert_raises(
                    NonZeroRetCodeException, 'socket.timeout: timed out'):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_udp_check(node_ip, port)
def test_overuse_pv_quota(cluster):
    """
    Scenario as follows:
    1. Create pod with PV(size 1GB) on it
    2. Write 640MB of data on the attached PV. Operation should complete with
        no errors
    3. Try to write another 512MB of data on the same PV. This should fail,
        due to insufficent disk space
    """
    utils.log_debug('===== Overuse Disk quota =====', LOG)
    pv_name = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, mount_path)
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod_1',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')

    container_id = pod.get_container_id(container_image='nginx')
    # write 640MB to PV
    cmd1 = 'dd if=/dev/zero of={}/tempfile1 bs=64M ' \
           'count=10'.format(mount_path)
    utils.log_debug('Before wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    # should fail, due to insufficent disk space
    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('Before wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    utils.log_debug('Restart pod with wipe out', LOG)
    pod.redeploy(wipeOut=True)
    utils.wait_for(
        lambda: container_id != pod.get_container_id(container_image='nginx'))
    pod.wait_for_status('running')
    container_id = pod.get_container_id(container_image='nginx')

    utils.log_debug('After wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('After wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    pod.delete()
    pv.delete()
def test_error_start_with_shutdown_local_storage(cluster):
    # type: (KDIntegrationTestAPI) -> None
    """
    Tests that the pod which has persistent volume on the host with status
    'troubles' will produce error on the start and won't start the pod.

    More specifically:
    1. Create the pod with persistent volume 'pv'.
    2. Shut down the pod and the host on which it resides.
    3. Wait till the kubernetes see the host as not working.
    4. Create a new pod with the same persistent volume 'pv'.
    5. Starting the new pod should result in an immediate error.
    """
    pv = cluster.pvs.add('dummy', 'fakepv', '/nginxpv')
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')
    host = pod.info['host']
    pod.stop()
    pod.wait_for_status('stopped')
    with cluster.temporary_stop_host(host):
        wait_for(lambda: cluster.get_host_status(host) == 'troubles')
        new_pod = cluster.pods.create('nginx',
                                      'test_nginx_pod_new',
                                      pvs=[pv],
                                      start=False)
        with assert_raises(
                NonZeroRetCodeException,
                "There are no suitable nodes for the pod. Please try"
                " again later or contact KuberDock administrator"):
            new_pod.start()
        assert new_pod.status == 'stopped'
Esempio n. 5
0
def test_a_pv_created_together_with_pod(cluster):
    # type: (KDIntegrationTestAPI) -> None
    # We have issue related to using non-unique disk names within
    # same CEPH pool (AC-3831). That is why name is randomized.
    pv_name = utils.gen_rnd_ceph_pv_name()

    mount_path = '/usr/share/nginx/html'

    # It is possible to create an nginx pod together with new PV
    pv = cluster.pvs.add("dummy", pv_name, mount_path)
    pod = cluster.pods.create("nginx",
                              "test_nginx_pod_1",
                              pvs=[pv],
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              ports_to_open=(80, ))
    utils.assert_eq(pv.exists(), True)

    c_id = pod.get_container_id(container_image='nginx')
    pod.docker_exec(c_id,
                    'echo -n TEST > {path}/test.txt'.format(path=mount_path))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)
    pod.delete()

    # It is possible to create an nginx pod using existing PV
    pod = cluster.pods.create("nginx",
                              "test_nginx_pod_2",
                              pvs=[pv],
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              ports_to_open=(80, ))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)
    pod.delete()

    # It's possible to remove PV created together with pod
    pv.delete()
    utils.assert_eq(pv.exists(), False)

    # Create another PV with the same name
    pv = cluster.pvs.add('dummy', pv_name, mount_path)
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod_3',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              ports_to_open=(80, ))
    utils.assert_eq(pv.exists(), True)

    # '/test.txt' is not on newly created PV, we expect HTTP Error 404
    with utils.assert_raises(HTTPError, 'HTTP Error 404: Not Found'):
        pod.do_GET(path='/test.txt')

    pod.delete()
    pv.delete()
    utils.assert_eq(pv.exists(), False)
def _run_test_for_port_and_proto(cluster, port, proto):
    check_custom_port(cluster, port, proto, is_open=False)

    utils.log_debug(
        "Open port: '{proto}:{port}'".format(port=port, proto=proto), LOG)
    allowed_ports_open(cluster, port, proto)

    utils.log_debug(
        "Check that port: '{proto}:{port}' is listed as open".format(
            proto=port, port=proto), LOG)
    assert_open_ports(cluster, port, proto)

    check_custom_port(cluster, port, proto, is_open=True)

    utils.log_debug(
        "Close port: '{proto}:{port}'".format(port=port, proto=proto), LOG)
    allowed_ports_close(cluster, port, proto)

    utils.log_debug(
        "Check that port: '{proto}:{port}' is NOT listed as open".format(
            proto=proto, port=port), LOG)
    with utils.assert_raises(AssertionError):
        assert_open_ports(cluster, port, proto)

    check_custom_port(cluster, port, proto, is_open=False)
Esempio n. 7
0
def test_create_delete_ippool(cluster):
    nginx1 = cluster.pods.create("nginx",
                                 "test_nginx_pod_1",
                                 open_all_ports=True,
                                 start=True,
                                 healthcheck=True,
                                 wait_ports=True,
                                 wait_for_status='running')

    with assert_raises(NonZeroRetCodeException,
                       text='.*You cannot delete this network.*',
                       expected_ret_codes=(1, )):
        cluster.ip_pools.clear()

    nginx1.healthcheck()
    nginx1.delete()
    cluster.ip_pools.clear()
def test_pod_not_start_with_pv_on_shut_down_host(cluster):
    # type: (KDIntegrationTestAPI) -> None
    """
    Tests that pod will not be able to start, if it has the persistent volume
    on the node that is in troubles state.
    This is a test for https://cloudlinux.atlassian.net/browse/AC-4087

    Specifically, the behaviour is follows:
    1. Create the pod with persistent volume 'pv'.
    2. Delete pod and persistent volume 'pv'.
    3. Create the pod with persistent volume 'pv', which are exactly the same
       as on the first step.
    4. Shut down the pod and the host on which it resides.
    5. Wait till the kubernetes see the host as not working.
    6. Create a new pod with the same persistent volume 'pv'.
    7. Starting the new pod should result in immediate error.
    """
    pv = cluster.pvs.add('dummy', 'pv', '/nginxpv')
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')
    pod.delete()
    pv.delete()
    pv = cluster.pvs.add('dummy', 'pv', '/nginxpv')
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')
    host = pod.info['host']
    pod.stop()
    pod.wait_for_status('stopped')
    with cluster.temporary_stop_host(host):
        wait_for(lambda: cluster.get_host_status(host) == 'troubles')
        new_pod = cluster.pods.create('nginx',
                                      'test_nginx_pod_new',
                                      pvs=[pv],
                                      start=False)
        with assert_raises(
                NonZeroRetCodeException,
                "There are no suitable nodes for the pod. Please try"
                " again later or contact KuberDock administrator"):
            new_pod.start()
        assert new_pod.status == 'stopped'
Esempio n. 9
0
def test_validate_yaml(cluster):
    # Check that --validate flag prevents creating invalid PA template
    with assert_raises(NonZeroRetCodeException, "Unable to parse template"):
        _pa_create(cluster, "incorrect pa",
                   data="incorrect: template\nexpression",
                   validate=True, check_output=False)

    # Check that --validate flag allows creating valid template
    name = 'correct pa'
    _pa_create(cluster, name, f=PREDEFINED_APPLICATION_FILES["dokuwiki"],
               validate=True, check_output=True)

    # Check that PA template list contains only "correct pa"
    _, out, _ = cluster.kdctl("predefined-apps list")
    templates = json.loads(out)["data"]
    assert_eq(len(templates), 1)
    assert_eq(templates[0]["name"], name)
Esempio n. 10
0
def test_pod_ip_resource(cluster):
    # type: (KDIntegrationTestAPI) -> None
    # It's not possible to create a POD with public IP with no IP pools
    cluster.ip_pools.clear()
    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        cluster.pods.create("nginx",
                            "test_nginx_pod_2",
                            open_all_ports=True,
                            start=True)

    assert_eq(cluster.pods.filter_by_owner(), [])

    # It's still possible to create a pod without a public IP
    cluster.pods.create("nginx",
                        "test_nginx_pod_3",
                        start=True,
                        open_all_ports=False,
                        wait_for_status='running')
Esempio n. 11
0
def test_add_get_delete_predefined_application_template_by_name(cluster):
    # Check that PA template can be added from the file
    name = "my pa1"
    _pa_create(cluster, name, f=PREDEFINED_APPLICATION_FILES["dokuwiki"],
               check_output=True)

    # Check that PA template can be got by it's name (kdctl)
    template = _pa_get(cluster, command="kdctl", name=name)
    _check_pa_template(template, name=name, origin="unknown")

    # Check that PA template can be got by it's name (kcli2)
    template = _pa_get(cluster, command="kcli2", name=name)
    _check_pa_template(template, name=name, origin="unknown")

    # Check that PA can be deleted by it's name
    _pa_delete(cluster, name=name)
    with assert_raises(NonZeroRetCodeException, "Error: Unknown name my pa1"):
        _pa_get(cluster, command="kcli2", name=name)
Esempio n. 12
0
def test_pod_with_pv_restore(cluster):
    """Test that pod with PVs can be restored.

    :type cluster: KDIntegrationTestAPI
    """
    file_name = BACKUP_FILES[NGINX_WITH_PV]
    backup_url = "http://node1/backups"
    path_template = '{owner_id}/{volume_name}.tar.gz'
    # Test that pod with persistent volume can be restored
    pod = cluster.pods.restore(USER, file_path=file_name,
                               pv_backups_location=backup_url,
                               pv_backups_path_template=path_template,
                               wait_for_status="running")
    pod.wait_for_ports()
    assert_in("This page has been restored from tar.gz",
              pod.do_GET(path='/restored_location/'))
    old_id = pod.pod_id

    # Test that pod isn't removed if pod with same name is restored with
    # --force-not-delete flag
    with assert_raises(NonZeroRetCodeException,
                       'Pod with name .* already exists'):
        cluster.pods.restore(USER, file_path=file_name,
                             pv_backups_location=backup_url,
                             pv_backups_path_template=path_template,
                             flags="--force-not-delete")
    # If pod has't been restored, it's id should not be changed
    assert_eq(old_id, pod.pod_id)

    # Test that pod is removed together with disks if pod with same name
    # and same disks names is restored with --force-delete flag
    path_template = '{owner_name}/{volume_name}.zip'
    pod2 = cluster.pods.restore(USER, file_path=file_name,
                                pv_backups_location=backup_url,
                                pv_backups_path_template=path_template,
                                flags="--force-delete",
                                return_as_json=True)
    # If pod was restored than it's id should distinguish from id of pod
    # with same name, that has just been removed
    assert_not_eq(old_id, pod2.pod_id)
    pod2.wait_for_ports()
    assert_in("This page has been restored from zip",
              pod.do_GET(path='/restored_location/'))
Esempio n. 13
0
def test_add_get_delete_predefined_application_template_by_id(cluster):
    # Check that PA template can be added from the cmd line
    _, template, _ = cluster.ssh_exec("master", "cat {}".format(
                                PREDEFINED_APPLICATION_FILES["drupal"]))
    name = "my pa2"
    template = _pa_create(cluster, name, data=template, check_output=True)

    # Check that PA template can be got by it's id (kdctl)
    id_ = template["id"]
    template = _pa_get(cluster, command="kdctl", id=id_)
    _check_pa_template(template, name=name, origin="unknown", id=id_)

    # Check that PA template can be got by it's id (kcli2)
    template = _pa_get(cluster, command="kcli2", id=id_)
    _check_pa_template(template, name=name, origin="unknown", id=id_)

    # Check that PA can be deleted by it's id's
    _pa_delete(cluster, id=id_)
    with assert_raises(NonZeroRetCodeException, "No such predefined app"):
        _pa_get(cluster, command="kdctl", id=id_)
def test_pv_states_and_deletion_via_kcli2(cluster):
    """
    TestRail Case: Different states of persistent volumes (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/145

    TestRail Case: Removing persistent volume (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/143

    TestRail Case: Try to remove persistent volume which is in use (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/146
    """
    pv1_name = 'disk1'
    pv2_name = 'disk2'
    pv1_mpath = '/nginxpv1'
    pv2_mpath = '/nginxpv2'
    pv1 = cluster.pvs.add('dummy', pv1_name, pv1_mpath)
    pv2 = cluster.pvs.add('dummy', pv2_name, pv2_mpath)
    pod_name = 'test_nginx_pv_states_via_kcli'
    pod = cluster.pods.create(
        'nginx', pod_name, pvs=[pv1, pv2], start=True,
        wait_for_status='running')

    log_debug('Get PV info via kcli2 using PV name', LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()
    # Get 'disk1' and 'disk2' ids
    pv1_id = pv1_info['id']
    pv2_id = pv2_info['id']

    # Disk1 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    # Disk2 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    log_debug('Get PV info via kcli2 using PV id', LOG)
    pv1_info = pv1.info(id=pv1_id)
    pv2_info = pv2.info(id=pv2_id)

    # Disk1 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    # Disk2 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    log_debug("Try to delete PVs 'disk1' and 'disk2' with pod 'running'", LOG)
    with assert_raises(NonZeroRetCodeException, 'Persistent disk is used.*'):
        pv1.delete(name=pv1_name)

    with assert_raises(NonZeroRetCodeException, 'Persistent disk is used.*'):
        pv2.delete(id=pv2_id)

    log_debug('List PVs using kcli2', LOG)
    pv_list = cluster.pvs.filter()

    log_debug("Make sure 'disk1' and 'disk2' are in the list", LOG)
    assert_eq(pv1_name in [pv['name'] for pv in pv_list], True)
    assert_eq(pv2_name in [pv['name'] for pv in pv_list], True)

    # Stop the pod
    pod.stop()
    pod.wait_for_status('stopped')

    log_debug("Try to delete PVs 'disk1' and 'disk2' with pod 'stopped'", LOG)
    with assert_raises(NonZeroRetCodeException, 'Volume can not be deleted.'
                       ' Reason: Persistent Disk is linked by pods:.*'):
        pv1.delete(name=pv1_name)

    with assert_raises(NonZeroRetCodeException, 'Volume can not be deleted.'
                       ' Reason: Persistent Disk is linked by pods:.*'):
        pv2.delete(id=pv2_id)

    # Get disk info once again
    log_debug("Pod is stopped and 'in_use' should become 'False'", LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()

    # Disk1 states: 'forbidDeletion': True, 'in_use': False
    # 'test_nginx_pv_states_via_kcli' should still be in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=False),
                     pod_names=[pod_name])
    # Disk2 states: 'forbidDeletion': True, 'in_use': False
    # 'test_nginx_pv_states_via_kcli' should still be in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=False),
                     pod_names=[pod_name])

    pod.delete()

    log_debug("Pod is deleted and both 'forbidDeletion' and 'in_use' should "
              "become 'False'", LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()

    # Disk1 states: 'forbidDeletion': False, 'in_use': False
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=False, in_use=False),
                     pod_names=[])
    # Disk2 states: 'forbidDeletion': False, 'in_use': False
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=False, in_use=False),
                     pod_names=[])

    log_debug("Delete 'disk1' using '--name'", LOG)
    res = pv1.delete(name=pv1_name)
    assert_eq(res['status'], 'OK')

    log_debug("Delete 'disk2' using '--id'", LOG)
    res = pv2.delete(id=pv2_id)
    assert_eq(res['status'], 'OK')

    log_debug("Check that 'disk1' is deleted", LOG)
    with assert_raises(NonZeroRetCodeException, 'Error: Unknown name'):
        pv1.info()

    log_debug("Check that 'disk2' is deleted", LOG)
    with assert_raises(NonZeroRetCodeException, 'Error: Unknown name'):
        pv2.info()
def test_delete_node_with_pv(cluster):
    """
    Scenario as follows:
    1. Create 2 pods(pod1, pod2) with PVs(pv1, pv2).
    2. Try to delete node. This should fail.
    3. Delete pod2 and pv2.
    4. Try to delete node. This should fail again.
    5. Delete pod1.
    6. Try to delete node. This shuild fail again.
    7. Delete pv1.
    8. Delete node. Node should be deleted.
    """
    utils.log_debug('===== Delete Node with PV =====', LOG)

    pv_name1 = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path1 = '/nginxpv1'
    pv_name2 = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path2 = '/nginxpv2'

    pv1 = cluster.pvs.add('new', pv_name1, mount_path1)
    pv2 = cluster.pvs.add('new', pv_name2, mount_path2)

    pod1 = cluster.pods.create('nginx',
                               'test_nginx_pod_1',
                               pvs=[pv1],
                               start=False)
    pod2 = cluster.pods.create('nginx',
                               'test_nginx_pod_2',
                               pvs=[pv1, pv2],
                               start=True,
                               wait_for_status='running')

    hosting_node = cluster.nodes.get_node(pod2.node)

    pod2.stop()
    pod2.wait_for_status('stopped')

    # Try to delete node with pv1 and pv2 on it. Should fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pod2.delete()
    pv2.delete()
    # Try to delete node with pv1 on it. Should fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pod1.delete()
    # pod1 is deleted, but pv1 is still linked to the node.
    # deletion will fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pv1.delete()
    # no pvs left on node, so it can be deleted with no problem.
    hosting_node.delete()
def test_zfs_volumes_mount_properly(cluster):
    """
    Automate TestRail case: Deploy with ZFS parameter

    https://cloudlinux.testrail.net/index.php?/cases/view/81
    """
    image = 'nginx'
    pv_name = utils.get_rnd_low_string(prefix='zfs_pv_')
    pv_mpath = '/usr/share/nginx/html'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    pod = cluster.pods.create(image,
                              'nginx_zfs_volume_mounts',
                              pvs=[pv],
                              ports_to_open=(80, ),
                              start=True,
                              wait_for_status='running',
                              wait_ports=True)

    pod_owner = cluster.users.get(name=pod.owner)
    pv_mountpoint = os.path.join(ZFS_POOL_MOUNTPOINT, str(pod_owner.get('id')),
                                 pv_name)
    check_volume_mounts(cluster, pod, log_msg_prefix='BEFORE NODE REBOOT: ')

    utils.log_debug("Write a file 'test.txt' to PV and get it via HTTP", LOG)
    c_id = pod.get_container_id(container_image=image)
    pod.docker_exec(c_id, 'echo -n TEST > {}/test.txt'.format(pv_mpath))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    # Reboot Node
    cluster.nodes.get_node(pod.node).reboot()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_ports()

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER NODE REBOOT: ')

    utils.log_debug(
        "Make sure that we can get 'test.txt' via HTTP after node reboot", LOG)
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    c_id = pod.get_container_id(container_image=image)

    utils.log_debug('Restart Pod and check that volumes are mounted correctly')
    pod.redeploy()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_status('running')

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER POD RESTART: ')

    node = pod.node
    pod.delete()
    pv.delete()

    utils.log_debug(
        "Make sure that '{}' is not mounted after PV deletion".format(pv_name),
        LOG)
    with utils.assert_raises(NonZeroRetCodeException,
                             expected_ret_codes=GREP_EXIT_CODES):
        utils.retry(assert_volume_mounts,
                    cluster=cluster,
                    mountpoint=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)

    utils.log_debug(
        "Make sure that '{}' is not in mountpoints".format(pv_name), LOG)
    pool_path = os.path.join(ZFS_POOL, str(pod_owner.get('id')), pv_name)

    with utils.assert_raises(NonZeroRetCodeException,
                             'dataset does not exist'):
        utils.retry(assert_zfs_mount_points,
                    cluster=cluster,
                    pool_path=pool_path,
                    volume_mp=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)
Esempio n. 17
0
def test_cannot_create_pod_with_public_ip_with_no_pools(cluster):
    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        create_new_pods(cluster, num=1, open_all_ports=True)

    assert_eq(cluster.pods.filter_by_owner(), [])
def test_network_isolation_for_user_pods(cluster):
    # type: (KDIntegrationTestAPI) -> None
    user1_pods = ['iso1', 'iso3', 'iso4']
    # user2_pods = ['iso2']
    container_ids, container_ips, pods, specs = setup_pods(cluster)

    # ------ General tests -------
    # Docker container has access to the world
    for pod in pods.values():
        ping(pod, container_ids[pod.name], '8.8.8.8')

    # Docker container has a working DNS server
    for pod in pods.values():
        ping(pod, container_ids[pod.name], 'cloudlinux.com')

    for name, pod in pods.items():
        # Check that 10.254.0.10 DNS POD is reachable from container
        pod.docker_exec(container_ids[name],
                        'dig +short cloudlinux.com @10.254.0.10')
        pod.docker_exec(container_ids[name],
                        'dig +short +tcp cloudlinux.com @10.254.0.10')
        # Check that external DNS also works
        pod.docker_exec(container_ids[name],
                        'dig +short cloudlinux.com @8.8.8.8')
        pod.docker_exec(container_ids[name],
                        'dig +short +tcp cloudlinux.com @8.8.8.8')

    # Container can access itself by container IP
    for pod in pods.keys():
        # ICMP check
        ping(pods[pod], container_ids[pod], container_ips[pod])
        # TCP check
        http_check(pods[pod], container_ids[pod], container_ips[pod])
        # UDP check
        container_udp_server(pods[pod], container_ids[pod])
        udp_check(pods[pod], container_ids[pod], container_ips[pod])

    # Container can reach it's public IP
    for pod in (p for p in pods.values() if p.public_ip):
        # TCP check
        http_check(pod, container_ids[pod.name], pod.public_ip)
        # UDP check
        if UDP_PORT in pod.ports:
            container_udp_server(pods[pod.name], container_ids[pod.name])
            udp_check(pod, container_ids[pod.name], pod.public_ip)
        else:
            container_udp_server(pod, container_ids[pod.name])
            with utils.assert_raises(NonZeroRetCodeException):
                udp_check(pod, container_ids[pod.name], pod.public_ip)

    # Docker container should have access to kubernetes over flannel
    for name, pod in pods.items():
        https_check(pod, container_ids[name], '10.254.0.1')

    # ----- User -> User isolation tests -----
    # Containers of the same user can reach each other via pod IP
    for src, dst in itertools.product(user1_pods, user1_pods):
        # TCP check
        http_check(pods[src], container_ids[src], container_ips[dst])
        # UDP check
        container_udp_server(pods[dst], container_ids[dst])
        udp_check(pods[src], container_ids[src], container_ips[dst])

    # Containers of the same user see each other via service IP AC-1530
    # NB! Within KuberDock it's called podIP for historical reasons
    for src, dst in itertools.product(user1_pods, user1_pods):
        # TCP check
        http_check(pods[src], container_ids[src], specs[dst]['podIP'])
        # UDP check
        container_udp_server(pods[dst], container_ids[dst])
        udp_check(pods[src], container_ids[src], specs[dst]['podIP'])

    # Containers of the same user can reach each other via public IP
    for src, dst in itertools.product(user1_pods, user1_pods):
        if 'public_ip' not in specs[dst]:
            continue
        # TCP check
        http_check(pods[src], container_ids[src], specs[dst]['public_ip'])
        # UDP check
        container_udp_server(pods[dst], container_ids[dst])
        udp_check(pods[src], container_ids[src], specs[dst]['public_ip'])

    # test_user: iso2 -> test_user: iso1
    # Containers of different users see each other via public/pod/service IP
    # through public ports
    src, dst = 'iso2', 'iso1'
    data = [(specs[dst]['public_ip'], True), (container_ips[dst], True),
            (specs[dst]['podIP'], False)]
    # Here we check that user1's pod has access to all public ports of
    # user2's pod
    for host, do_ping in data:
        # ICMP check
        if do_ping:
            ping(pods[src], container_ids[src], host)
        else:
            with utils.assert_raises(NonZeroRetCodeException):
                ping(pods[src], container_ids[src], host)
        # TCP check. port 80 is public
        http_check(pods[src], container_ids[src], host)
        # UDP check. port 2000 is public
        container_udp_server(pods[dst], container_ids[dst])
        udp_check(pods[src], container_ids[src], host)

    # test_user: iso1 -> alt_test_user: iso2
    # Containers of different users don't see each other through closed ports,
    # only through public ports (public/service/pod IP)
    src, dst = 'iso1', 'iso2'
    data = [(specs[dst]['public_ip'], True), (container_ips[dst], True),
            (specs[dst]['podIP'], False)]
    # Here we check that user1 pod has access to public ports (TCP:80)
    # of user2's pod and doesn't have access to non-public ports (UDP:2000)
    for host, do_ping in data:
        # ICMP check
        if do_ping:
            ping(pods[src], container_ids[src], host)
        else:
            with utils.assert_raises(NonZeroRetCodeException):
                ping(pods[src], container_ids[src], host)
        # TCP check. port 80 is public
        http_check(pods[src], container_ids[src], host)
        # UDP check. port 2000 is not public
        container_udp_server(pods[dst], container_ids[dst])
        with utils.assert_raises(NonZeroRetCodeException, 'No PONG received'):
            udp_check(pods[src], container_ids[src], host)

    # alt_test_user: iso2 -> test_user: iso3
    # Different users' pods can't access each other via service/pod IP
    src, dst = 'iso2', 'iso3'
    for host in (container_ips[dst], specs[dst]['podIP']):
        # ICMP check
        # with assert_raises(NonZeroRetCodeException):
        #     ping(pods[src], container_ids[src], host)
        # TCP check. port 80 is closed
        # Here we expect EXIT_CODE to be:
        # 7 (Failed to connect) or 28 (Connection timed out)
        with utils.assert_raises(NonZeroRetCodeException,
                                 expected_ret_codes=CURL_CONNECTION_ERRORS):
            http_check(pods[src], container_ids[src], host)
        # UDP check. port 2000 is closed
        container_udp_server(pods[dst], container_ids[dst])
        with utils.assert_raises(NonZeroRetCodeException, 'No PONG received'):
            udp_check(pods[src], container_ids[src], host)
def test_network_isolation_nodes_from_pods(cluster):
    container_ids, container_ips, pods, specs = setup_pods(cluster)
    # ----- Node isolation -----
    LOG_MSG_HEAD = "Pod: '{}' public IP: '{}' host node: '{}'"
    LOG_MSG_TAIL = "accessing node: '{}' port: '{}' proto: '{}'"
    for name, pod in pods.items():
        # Container can't access node's IP it's created on
        host_ip = specs[name]['hostIP']
        # ICMP check
        # with assert_raises(NonZeroRetCodeException, '100% packet loss'):
        #     ping(pods[name], container_ids[name], host_ip)

        msg_head = LOG_MSG_HEAD.format(name, pod.public_ip,
                                       specs[name]['host'])
        # TCP check
        # Here we expect EXIT_CODE to be:
        # 7 (Failed to connect) or 28 (Connection timed out)
        msg_tail = LOG_MSG_TAIL.format(specs[name]['host'], CADVISOR_PORT,
                                       'TCP')
        LOG.debug('{}{} {}{}'.format(Fore.CYAN, msg_head, msg_tail,
                                     Style.RESET_ALL))
        with utils.assert_raises(NonZeroRetCodeException,
                                 expected_ret_codes=CURL_CONNECTION_ERRORS):
            # cadvisor port
            pod_check_node_tcp_port(pods[name],
                                    container_ids[name],
                                    host_ip,
                                    port=CADVISOR_PORT)

        # UDP check
        msg_tail = LOG_MSG_TAIL.format(specs[name]['host'], UDP_PORT, 'UDP')
        LOG.debug('{}{} {}{}'.format(Fore.CYAN, msg_head, msg_tail,
                                     Style.RESET_ALL))
        host_udp_server(cluster, pods[name].info['host'])
        with utils.assert_raises(NonZeroRetCodeException, 'No PONG received'):
            udp_check(pods[name], container_ids[name], specs[name]['hostIP'])

        # Container can't access node's IP it was not created on
        # We do not know which node the pod will land on, so we can't tell in
        # advance what the "other nodes" are. Should find this out
        nodes, pod_node = cluster.node_names, pods[name].info['host']
        nodes.remove(pod_node)
        another_node = nodes[0]
        non_host_ip = cluster.get_host_ip(another_node)

        msg_tail = LOG_MSG_TAIL.format(another_node, CADVISOR_PORT, 'TCP')
        # TCP check
        # Here we expect EXIT_CODE to be:
        # 7 (Failed to connect) or 28 (Connection timed out)
        LOG.debug('{}{} {}{}'.format(Fore.CYAN, msg_head, msg_tail,
                                     Style.RESET_ALL))
        with utils.assert_raises(NonZeroRetCodeException,
                                 expected_ret_codes=CURL_CONNECTION_ERRORS):
            # cadvisor port
            pod_check_node_tcp_port(pods[name],
                                    container_ids[name],
                                    non_host_ip,
                                    port=CADVISOR_PORT)

        # UDP check
        msg_tail = LOG_MSG_TAIL.format(another_node, UDP_PORT, 'UDP')
        host_udp_server(cluster, another_node)
        LOG.debug('{}{} {}{}'.format(Fore.CYAN, msg_head, msg_tail,
                                     Style.RESET_ALL))
        with utils.assert_raises(NonZeroRetCodeException, 'No PONG received'):
            udp_check(pods[name], container_ids[name], non_host_ip)
def test_network_isolation_pods_from_cluster(cluster):
    container_ids, container_ips, pods, specs = setup_pods(cluster)
    # ------ Registered hosts tests ------
    # Pod IPs
    pod_ip_list = [(pod, container_ips[name], True)
                   for name, pod in pods.items()]
    # Service IPs. Don't respond to pings
    pod_ip_list.extend([(pod, specs[name]['podIP'], False)
                        for name, pod in pods.items()])

    # Registered hosts have access through all ports via pod/service IP
    for pod, target_host, do_ping in pod_ip_list:
        # ICMP check
        if do_ping:
            host_icmp_check_pod(cluster, 'rhost1', target_host)
        else:
            with utils.assert_raises(NonZeroRetCodeException):
                host_icmp_check_pod(cluster, 'rhost1', target_host)
        # TCP check
        host_http_check_pod(cluster, 'rhost1', target_host)
        # UDP check
        container_udp_server(pod, container_ids[pod.name])
        host_udp_check_pod(cluster, 'rhost1', target_host)

    # ---------- Master tests ---------
    for pod, target_host, do_ping in pod_ip_list:
        # ICMP check
        if do_ping:
            host_icmp_check_pod(cluster, 'master', target_host)
        else:
            with utils.assert_raises(NonZeroRetCodeException):
                host_icmp_check_pod(cluster, 'master', target_host)
        # TCP check
        host_http_check_pod(cluster, 'master', target_host)
        # UDP check
        container_udp_server(pod, container_ids[pod.name])
        host_udp_check_pod(cluster, 'master', target_host)

    # ----------- Nodes tests ----------
    # Node has access to (service/pod IP: any port) of the pods it's hosting
    # Another node has access to (service/pod IP: public port) only.
    # iso2: public ports: TCP:80 closed port: UDP:2000
    target_pod = 'iso2'
    host_node = specs[target_pod]['host']
    another_node = [n for n in cluster.node_names if n != host_node][0]
    iso2_ip_list = [
        (container_ips[target_pod], True),
        (specs[target_pod]['podIP'], False),
    ]
    for target_ip, do_ping in iso2_ip_list:
        # Here we check that node hosting a pod has access to it via
        # pod/service IP using all ports, i.e. public and non-public
        # Host node ICMP check
        if do_ping:
            host_icmp_check_pod(cluster, host_node, target_ip)
        else:
            with utils.assert_raises(NonZeroRetCodeException):
                host_icmp_check_pod(cluster, host_node, target_ip)
        # Host node TCP check
        host_http_check_pod(cluster, host_node, target_ip)
        # Host node UDP check
        container_udp_server(pods[target_pod], container_ids[target_pod])
        host_udp_check_pod(cluster, host_node, target_ip)

        # Here we check that node not hosting a pod has access to it
        # via pod/service IP using only public ports: TCP:80.
        # NOTE: UDP:2000 non-public
        # Another node ICMP check
        if do_ping:
            host_icmp_check_pod(cluster, another_node, target_ip)
        else:
            with utils.assert_raises(NonZeroRetCodeException):
                host_icmp_check_pod(cluster, another_node, target_ip)
        # Another node TCP check
        host_http_check_pod(cluster, another_node, target_ip)
        # Another node UDP check. port 2000 is not public
        container_udp_server(pods[target_pod], container_ids[target_pod])
        with utils.assert_raises(NonZeroRetCodeException):
            host_udp_check_pod(cluster, another_node, target_ip)

    # ---------- Node has access to world -------------
    for node_name in cluster.node_names:
        cluster.ssh_exec(node_name, 'ping -c 2 cloudlinux.com')

    # ------ Unregistered hosts tests ------
    # Node isolation from world
    # NOTE: Needs rework
    for node_name in cluster.node_names:
        node_ip = cluster.get_host_ip(node_name)
        # Try port 22
        unregistered_host_ssh_check(node_ip)

        for rec in _get_node_ports(cluster, node_name):
            if rec[0] == 'udp' or rec[1] == 22:
                continue
            with utils.assert_raises(NonZeroRetCodeException):
                unregistered_host_port_check(node_ip, rec[1])

    # Unregistered host can access public ports only
    # Here we check that unregistered hosts can access via public IP using
    # public ports. In this example iso1 has 2 public ports: TCP:80 & UDP:2000
    # TCP http check
    unregistered_host_http_check(specs['iso1']['public_ip'])
    # UDP check
    container_udp_server(pods['iso1'], container_ids['iso1'])
    unregistered_host_udp_check(specs['iso1']['public_ip'])

    # Here we check that unregistered hosts can access via public IP using
    # public ports only. In this example iso2 has public port TCP:80 and
    # non-public port UDP:2000
    # TCP http check
    unregistered_host_http_check(specs['iso2']['public_ip'])
    # UDP check (port 2000 is closed)
    container_udp_server(pods['iso2'], container_ids['iso2'])
    with utils.assert_raises(NonZeroRetCodeException):
        unregistered_host_udp_check(specs['iso2']['public_ip'], port=UDP_PORT)