def test_move_pods_and_delete_node_with_ceph_storage(cluster):
    pv_name = utils.get_rnd_low_string(prefix='ceph_pv_')
    pv_mpath = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    # NOTE: we want to make sure that pod lands on 'node1', because 'node4'
    # will be deleted later on.
    with cluster.temporary_stop_host('node4'):
        pod = cluster.pods.create('nginx',
                                  'test_nginx_pod_1',
                                  pvs=[pv],
                                  start=False)
        pod.start()
        pod.wait_for_status('running')

    prev_node = pod.node
    with cluster.temporary_stop_host(prev_node):
        utils.wait_for(lambda: pod.node != prev_node)
        pod.wait_for_status('running')

    utils.log_debug(
        "Delete node '{}' which is hosting the pod. Pod should move to "
        "node '{}'".format(pod.node, prev_node))
    hosting_node = cluster.nodes.get_node(node_name=pod.node)
    hosting_node.delete()

    utils.wait_for(lambda: pod.node == prev_node)
    pod.wait_for_status('running')
def test_error_start_with_shutdown_local_storage(cluster):
    # type: (KDIntegrationTestAPI) -> None
    """
    Tests that the pod which has persistent volume on the host with status
    'troubles' will produce error on the start and won't start the pod.

    More specifically:
    1. Create the pod with persistent volume 'pv'.
    2. Shut down the pod and the host on which it resides.
    3. Wait till the kubernetes see the host as not working.
    4. Create a new pod with the same persistent volume 'pv'.
    5. Starting the new pod should result in an immediate error.
    """
    pv = cluster.pvs.add('dummy', 'fakepv', '/nginxpv')
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')
    host = pod.info['host']
    pod.stop()
    pod.wait_for_status('stopped')
    with cluster.temporary_stop_host(host):
        wait_for(lambda: cluster.get_host_status(host) == 'troubles')
        new_pod = cluster.pods.create('nginx',
                                      'test_nginx_pod_new',
                                      pvs=[pv],
                                      start=False)
        with assert_raises(
                NonZeroRetCodeException,
                "There are no suitable nodes for the pod. Please try"
                " again later or contact KuberDock administrator"):
            new_pod.start()
        assert new_pod.status == 'stopped'
def test_overuse_pv_quota(cluster):
    """
    Scenario as follows:
    1. Create pod with PV(size 1GB) on it
    2. Write 640MB of data on the attached PV. Operation should complete with
        no errors
    3. Try to write another 512MB of data on the same PV. This should fail,
        due to insufficent disk space
    """
    utils.log_debug('===== Overuse Disk quota =====', LOG)
    pv_name = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, mount_path)
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod_1',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')

    container_id = pod.get_container_id(container_image='nginx')
    # write 640MB to PV
    cmd1 = 'dd if=/dev/zero of={}/tempfile1 bs=64M ' \
           'count=10'.format(mount_path)
    utils.log_debug('Before wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    # should fail, due to insufficent disk space
    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('Before wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    utils.log_debug('Restart pod with wipe out', LOG)
    pod.redeploy(wipeOut=True)
    utils.wait_for(
        lambda: container_id != pod.get_container_id(container_image='nginx'))
    pod.wait_for_status('running')
    container_id = pod.get_container_id(container_image='nginx')

    utils.log_debug('After wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('After wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    pod.delete()
    pv.delete()
def test_pods_move_on_failure(cluster):
    # type: (KDIntegrationTestAPI) -> None
    """
    Tests that the pod without local storage will move to another host in case
    of failure.
    """
    pod = cluster.pods.create('nginx', 'test_nginx_pod', start=True)
    pod.wait_for_status('running')
    host = pod.info['host']
    with cluster.temporary_stop_host(host):
        wait_for(lambda: pod.info['host'] != host)
        pod.wait_for_status('running')
def test_pod_not_start_with_pv_on_shut_down_host(cluster):
    # type: (KDIntegrationTestAPI) -> None
    """
    Tests that pod will not be able to start, if it has the persistent volume
    on the node that is in troubles state.
    This is a test for https://cloudlinux.atlassian.net/browse/AC-4087

    Specifically, the behaviour is follows:
    1. Create the pod with persistent volume 'pv'.
    2. Delete pod and persistent volume 'pv'.
    3. Create the pod with persistent volume 'pv', which are exactly the same
       as on the first step.
    4. Shut down the pod and the host on which it resides.
    5. Wait till the kubernetes see the host as not working.
    6. Create a new pod with the same persistent volume 'pv'.
    7. Starting the new pod should result in immediate error.
    """
    pv = cluster.pvs.add('dummy', 'pv', '/nginxpv')
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')
    pod.delete()
    pv.delete()
    pv = cluster.pvs.add('dummy', 'pv', '/nginxpv')
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')
    host = pod.info['host']
    pod.stop()
    pod.wait_for_status('stopped')
    with cluster.temporary_stop_host(host):
        wait_for(lambda: cluster.get_host_status(host) == 'troubles')
        new_pod = cluster.pods.create('nginx',
                                      'test_nginx_pod_new',
                                      pvs=[pv],
                                      start=False)
        with assert_raises(
                NonZeroRetCodeException,
                "There are no suitable nodes for the pod. Please try"
                " again later or contact KuberDock administrator"):
            new_pod.start()
        assert new_pod.status == 'stopped'
示例#6
0
    def get_container_id(self, container_name=None, container_image=None):
        if not (container_name is None) ^ (container_image is None):
            raise ValueError('You need to specify either the container_name'
                             ' or container image')

        spec = self.get_spec()
        if container_name is not None:

            def predicate(c):
                return c['name'] == container_name
        elif container_image is not None:

            def predicate(c):
                return c['image'] == container_image

        try:
            container = next(c for c in spec['containers'] if predicate(c))
        except StopIteration:
            LOG.error("Pod {} does not have {} container".format(
                self.name, container_name))
            raise

        utils.wait_for(lambda: container['containerID'] is not None)
        return container['containerID']
def test_zfs_volumes_mount_properly(cluster):
    """
    Automate TestRail case: Deploy with ZFS parameter

    https://cloudlinux.testrail.net/index.php?/cases/view/81
    """
    image = 'nginx'
    pv_name = utils.get_rnd_low_string(prefix='zfs_pv_')
    pv_mpath = '/usr/share/nginx/html'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    pod = cluster.pods.create(image,
                              'nginx_zfs_volume_mounts',
                              pvs=[pv],
                              ports_to_open=(80, ),
                              start=True,
                              wait_for_status='running',
                              wait_ports=True)

    pod_owner = cluster.users.get(name=pod.owner)
    pv_mountpoint = os.path.join(ZFS_POOL_MOUNTPOINT, str(pod_owner.get('id')),
                                 pv_name)
    check_volume_mounts(cluster, pod, log_msg_prefix='BEFORE NODE REBOOT: ')

    utils.log_debug("Write a file 'test.txt' to PV and get it via HTTP", LOG)
    c_id = pod.get_container_id(container_image=image)
    pod.docker_exec(c_id, 'echo -n TEST > {}/test.txt'.format(pv_mpath))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    # Reboot Node
    cluster.nodes.get_node(pod.node).reboot()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_ports()

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER NODE REBOOT: ')

    utils.log_debug(
        "Make sure that we can get 'test.txt' via HTTP after node reboot", LOG)
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    c_id = pod.get_container_id(container_image=image)

    utils.log_debug('Restart Pod and check that volumes are mounted correctly')
    pod.redeploy()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_status('running')

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER POD RESTART: ')

    node = pod.node
    pod.delete()
    pv.delete()

    utils.log_debug(
        "Make sure that '{}' is not mounted after PV deletion".format(pv_name),
        LOG)
    with utils.assert_raises(NonZeroRetCodeException,
                             expected_ret_codes=GREP_EXIT_CODES):
        utils.retry(assert_volume_mounts,
                    cluster=cluster,
                    mountpoint=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)

    utils.log_debug(
        "Make sure that '{}' is not in mountpoints".format(pv_name), LOG)
    pool_path = os.path.join(ZFS_POOL, str(pod_owner.get('id')), pv_name)

    with utils.assert_raises(NonZeroRetCodeException,
                             'dataset does not exist'):
        utils.retry(assert_zfs_mount_points,
                    cluster=cluster,
                    pool_path=pool_path,
                    volume_mp=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)