def test_move_pods_and_delete_node_with_ceph_storage(cluster):
    pv_name = utils.get_rnd_low_string(prefix='ceph_pv_')
    pv_mpath = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    # NOTE: we want to make sure that pod lands on 'node1', because 'node4'
    # will be deleted later on.
    with cluster.temporary_stop_host('node4'):
        pod = cluster.pods.create('nginx',
                                  'test_nginx_pod_1',
                                  pvs=[pv],
                                  start=False)
        pod.start()
        pod.wait_for_status('running')

    prev_node = pod.node
    with cluster.temporary_stop_host(prev_node):
        utils.wait_for(lambda: pod.node != prev_node)
        pod.wait_for_status('running')

    utils.log_debug(
        "Delete node '{}' which is hosting the pod. Pod should move to "
        "node '{}'".format(pod.node, prev_node))
    hosting_node = cluster.nodes.get_node(node_name=pod.node)
    hosting_node.delete()

    utils.wait_for(lambda: pod.node == prev_node)
    pod.wait_for_status('running')
def check_custom_port(cluster, port, proto, is_open=False):
    msg = "Check that port: '{proto}:{port}' on node '{node}' is '{state}'"
    for node in cluster.node_names:
        utils.log_debug(
            msg.format(proto=proto,
                       port=port,
                       node=node,
                       state='open' if is_open else 'closed'), LOG)
        node_ip = cluster.nodes.get_node_data(node).get('ip')
        if proto == 'tcp' and is_open:
            with paramiko_expect_http_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                res = unregistered_host_port_check(node_ip, port)
                utils.assert_in('Directory listing for /', res)
        elif proto == 'tcp' and not is_open:
            with paramiko_expect_http_server(cluster, node, port), \
                utils.assert_raises(NonZeroRetCodeException,
                                    expected_ret_codes=CURL_CONNECTION_ERRORS):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_port_check(node_ip, port)
        elif proto == 'udp' and is_open:
            with paramiko_expect_udp_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                out = unregistered_host_udp_check(node_ip, port)
                utils.assert_eq('PONG', out)
        else:
            with paramiko_expect_udp_server(cluster, node, port), \
                utils.assert_raises(
                    NonZeroRetCodeException, 'socket.timeout: timed out'):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_udp_check(node_ip, port)
def paramiko_expect_http_server(cluster, node, port):
    try:
        utils.log_debug(
            "Start HTTP server on node '{}' port '{}'".format(node, port), LOG)
        ssh = cluster.get_ssh(node)
        server = SSHClientInteraction(ssh)
        server.send('python -m SimpleHTTPServer {}'.format(port))
        yield
    finally:
        server.close()
def paramiko_expect_udp_server(cluster, node, port):
    try:
        utils.log_debug(
            "Start UDP server on node '{}' port '{}'".format(node, port), LOG)
        ssh = cluster.get_ssh(node)
        server = SSHClientInteraction(ssh)
        server.send(
            HOST_UP_UDP_SERVER_CMD.format(bind_ip='0.0.0.0', bind_port=port))
        yield
    finally:
        server.close()
예제 #5
0
def test_cant_start_resized_pod_if_cpu_is_low(cluster):
    cluster.set_system_setting("2", name="cpu_multiplier")
    cluster.set_system_setting("20", name="memory_multiplier")
    template = "wordpress.yaml"

    log_debug("Starting first pod")
    # TODO: remove sleep after fixing AC-5403
    sleep(10)
    pod = cluster.pods.create_pa(template, wait_for_status='running',
                                 pod_name="wordpress1")

    log_debug("Starting second pod")
    cluster.pods.create_pa(template, wait_for_status='running',
                           pod_name="wordpress2")
    log_debug("Starting third pod")
    cluster.pods.create_pa(template, wait_for_status='running',
                           pod_name="wordpress3")

    pod.change_kubes(kubes=9, container_name="wordpress")

    log_debug("Make sure there is warning about lack of CPUs in k8s")
    cmd = 'get events --namespace {} -o json'.format(pod.pod_id)

    def _check_presence_of_cpu_warning():
        _, out, _ = cluster.true_kubectl(cmd)
        try:
            next(e for e in json.loads(out)['items']
                 if e["reason"] == "FailedScheduling"
                 and K8S_CPU_LACK_ERROR in e["message"])
        except StopIteration:
            raise _NoResourseLackErrorInK8s("There aren't event with warning "
                                            "about lack of CPU in k8s")

    retry(_check_presence_of_cpu_warning, tries=10, interval=3)
def _run_test_for_port_and_proto(cluster, port, proto):
    check_custom_port(cluster, port, proto, is_open=False)

    utils.log_debug(
        "Open port: '{proto}:{port}'".format(port=port, proto=proto), LOG)
    allowed_ports_open(cluster, port, proto)

    utils.log_debug(
        "Check that port: '{proto}:{port}' is listed as open".format(
            proto=port, port=proto), LOG)
    assert_open_ports(cluster, port, proto)

    check_custom_port(cluster, port, proto, is_open=True)

    utils.log_debug(
        "Close port: '{proto}:{port}'".format(port=port, proto=proto), LOG)
    allowed_ports_close(cluster, port, proto)

    utils.log_debug(
        "Check that port: '{proto}:{port}' is NOT listed as open".format(
            proto=proto, port=port), LOG)
    with utils.assert_raises(AssertionError):
        assert_open_ports(cluster, port, proto)

    check_custom_port(cluster, port, proto, is_open=False)
def check_volume_mounts(cluster, pod, log_msg_prefix=''):
    utils.log_debug(
        "{}Run 'sudo zpool list' and make sure that '{}' is there".format(
            log_msg_prefix, ZFS_POOL), LOG)
    _, out, _ = cluster.ssh_exec(pod.node, 'zpool list', sudo=True)
    utils.assert_in(ZFS_POOL, out)

    for pv in pod.pvs:
        pod_owner = cluster.users.get(name=pod.owner)
        pv_mountpoint = os.path.join(ZFS_POOL_MOUNTPOINT,
                                     str(pod_owner.get('id')), pv.name)
        pool_path = os.path.join(ZFS_POOL, str(pod_owner.get('id')), pv.name)
        utils.log_debug(
            "{}Run 'zfs list' and check that '{}' is there".format(
                log_msg_prefix, pv.name), LOG)

        _, out, _ = cluster.ssh_exec(pod.node, 'zfs list', sudo=True)
        utils.assert_in(pv.name, out)

        # This may look redundant, but it's here only to be consistent with
        # test inside TestRail
        utils.log_debug(
            "{}Make sure that '{}' is mounted".format(log_msg_prefix, pv.name),
            LOG)
        assert_volume_mounts(cluster, pv_mountpoint, pod.node, utils.assert_in)

        utils.log_debug(
            "{}Make sure that '{}' has a correct mountpoint".format(
                log_msg_prefix, pv.name), LOG)

        assert_zfs_mount_points(cluster,
                                pool_path,
                                pv_mountpoint,
                                node=pod.node,
                                assertion=utils.assert_in)
예제 #8
0
    def _open_custom_ports(self):
        try:
            self.cluster.kdctl('allowed-ports open {} {}'.format(
                self.TCP_PORT_TO_OPEN, 'tcp'))
            self.cluster.kdctl('allowed-ports open {} {}'.format(
                self.UDP_PORT_TO_OPEN, 'udp'))
            _, out, _ = self.cluster.kdctl('allowed-ports list',
                                           out_as_dict=True)
            custom_ports = out['data']
            # Make sure that two ports are opened
            assert_eq(len(custom_ports), 2)

            # Make sure that both ports opened correctly
            assert_in(dict(port=self.TCP_PORT_TO_OPEN, protocol='tcp'),
                      custom_ports)
            assert_in(dict(port=self.UDP_PORT_TO_OPEN, protocol='udp'),
                      custom_ports)

        except (NonZeroRetCodeException, AssertionError) as e:
            log_debug("Couldn't open ports. Reason: {}".format(e))
예제 #9
0
def test_pod_with_long_domain_name(cluster):
    """
     Tes that pod with domain name's length equaling 63 (kubernetes
     limitation) symbols can be created and accessed
    """
    with open("tests_integration/assets/cpanel_credentials.json") as f:
        creds = json.load(f)

    # Adjusting pod name's length to make domain name's length equal 63. 53
    # is 63 - 10 (length of "testuser-.")
    pod_name = get_rnd_low_string(length=53 - len(creds["domain"]))

    log_debug(
        "Start the pod with shared IP, having domain name consisting "
        "of 63 symbols", LOG)
    pod = cluster.pods.create("nginx",
                              pod_name,
                              ports_to_open=[80],
                              wait_for_status="running",
                              domain=creds["domain"],
                              healthcheck=True,
                              wait_ports=True)
    assert_eq(pod.domain, "testuser-{}.{}".format(pod_name, creds["domain"]))
def test_two_pods_cant_use_same_pv(cluster):
    pv = cluster.pvs.add('dummy', 'nginxpv', '/nginxpv')
    pod1 = cluster.pods.create('nginx',
                               'test_nginx_pod_1',
                               pvs=[pv],
                               start=True,
                               wait_for_status='running')
    pod2 = cluster.pods.create('nginx',
                               'test_nginx_pod_2',
                               pvs=[pv],
                               start=False)

    utils.log_debug("Try to start 'pod2' that uses the same PV as 'pod1'", LOG)
    pod2.start()

    # FIXME: Need a proper way to determain that some resources where not
    # available when we tried to start the pod
    sleep(120)
    pod2.wait_for_status('stopped')

    pod1.delete()
    pod2.delete()
    pv.delete()
예제 #11
0
def test_pod_with_domain_name(cluster):
    suffix = get_rnd_low_string(length=5)
    pod_name = format(suffix)
    with open("tests_integration/assets/cpanel_credentials.json") as f:
        creds = json.load(f)
    log_debug("Start a pod with shared IP", LOG)
    pod = cluster.pods.create("nginx",
                              pod_name,
                              ports_to_open=[80],
                              wait_for_status="running",
                              domain=creds["domain"],
                              healthcheck=True,
                              wait_ports=True)
    log_debug("Restart the pod with shared IP", LOG)
    pod.redeploy()
    try:
        pod.wait_for_status("pending", tries=5, interval=3)
    except StatusWaitException:
        # When is rebooted pod often gets "pending" status for short time,
        # so this status isn't guaranteed to be catched by pod.wait_for_status
        pass
    pod.wait_for_status("running")
    assert_eq(pod.domain, "testuser-{}.{}".format(suffix, creds["domain"]))
    pod.wait_for_ports([80])
    pod.healthcheck()

    log_debug("Start and stop the pod with shared IP", LOG)
    pod.stop()
    pod.wait_for_status("stopped")
    pod.start()
    pod.wait_for_status("running")
    pod.wait_for_ports([80])
    pod.healthcheck()

    log_debug("Change number of kubes in the pod with shared IP", LOG)
    pod.change_kubes(kubes=3, container_image="nginx")
    try:
        # right after starting changing number of kubes pod is still running
        # for several seconds
        pod.wait_for_status("pending", tries=12)
    except StatusWaitException:
        # "pending" status lasts for very short time and may be not detected
        pass
    pod.wait_for_status("running")
    pod.wait_for_ports([80])
    pod.healthcheck()
def test_add_new_block_device(cluster):
    """
    Add a new block device into ZFS pool (Non-AWS)
    """
    for node in cluster.node_names:
        utils.log_debug("Add new block device to node '{}'".format(node), LOG)
        # NOTE: Generate a new file each time, so that if test is run on a
        # cluster multiple times nothing is broken, otherwise if we attach two
        # volumes with the same name, ZFS pool will be broken.
        # FIXME: Tried to detach the volume after the test is complete, but
        # couldn't figure out how to do it properly.
        of_path = utils.get_rnd_low_string(prefix='/tmp/dev', length=5)

        write_file_cmd = 'dd if=/dev/zero of="{}" bs=64M count=10'.format(
            of_path)
        cluster.ssh_exec(node, write_file_cmd)

        add_bl_device_cmd = ('node-storage add-volume --hostname {} '
                             '--devices {}'.format(node, of_path))
        cluster.manage(add_bl_device_cmd)

        utils.log_debug("Make sure a new block device is added", LOG)
        _, out, _ = cluster.ssh_exec(node, 'zpool status', sudo=True)
        utils.assert_in(of_path, out)
def test_pv_states_and_deletion_via_kcli2(cluster):
    """
    TestRail Case: Different states of persistent volumes (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/145

    TestRail Case: Removing persistent volume (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/143

    TestRail Case: Try to remove persistent volume which is in use (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/146
    """
    pv1_name = 'disk1'
    pv2_name = 'disk2'
    pv1_mpath = '/nginxpv1'
    pv2_mpath = '/nginxpv2'
    pv1 = cluster.pvs.add('dummy', pv1_name, pv1_mpath)
    pv2 = cluster.pvs.add('dummy', pv2_name, pv2_mpath)
    pod_name = 'test_nginx_pv_states_via_kcli'
    pod = cluster.pods.create(
        'nginx', pod_name, pvs=[pv1, pv2], start=True,
        wait_for_status='running')

    log_debug('Get PV info via kcli2 using PV name', LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()
    # Get 'disk1' and 'disk2' ids
    pv1_id = pv1_info['id']
    pv2_id = pv2_info['id']

    # Disk1 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    # Disk2 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    log_debug('Get PV info via kcli2 using PV id', LOG)
    pv1_info = pv1.info(id=pv1_id)
    pv2_info = pv2.info(id=pv2_id)

    # Disk1 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    # Disk2 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    log_debug("Try to delete PVs 'disk1' and 'disk2' with pod 'running'", LOG)
    with assert_raises(NonZeroRetCodeException, 'Persistent disk is used.*'):
        pv1.delete(name=pv1_name)

    with assert_raises(NonZeroRetCodeException, 'Persistent disk is used.*'):
        pv2.delete(id=pv2_id)

    log_debug('List PVs using kcli2', LOG)
    pv_list = cluster.pvs.filter()

    log_debug("Make sure 'disk1' and 'disk2' are in the list", LOG)
    assert_eq(pv1_name in [pv['name'] for pv in pv_list], True)
    assert_eq(pv2_name in [pv['name'] for pv in pv_list], True)

    # Stop the pod
    pod.stop()
    pod.wait_for_status('stopped')

    log_debug("Try to delete PVs 'disk1' and 'disk2' with pod 'stopped'", LOG)
    with assert_raises(NonZeroRetCodeException, 'Volume can not be deleted.'
                       ' Reason: Persistent Disk is linked by pods:.*'):
        pv1.delete(name=pv1_name)

    with assert_raises(NonZeroRetCodeException, 'Volume can not be deleted.'
                       ' Reason: Persistent Disk is linked by pods:.*'):
        pv2.delete(id=pv2_id)

    # Get disk info once again
    log_debug("Pod is stopped and 'in_use' should become 'False'", LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()

    # Disk1 states: 'forbidDeletion': True, 'in_use': False
    # 'test_nginx_pv_states_via_kcli' should still be in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=False),
                     pod_names=[pod_name])
    # Disk2 states: 'forbidDeletion': True, 'in_use': False
    # 'test_nginx_pv_states_via_kcli' should still be in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=False),
                     pod_names=[pod_name])

    pod.delete()

    log_debug("Pod is deleted and both 'forbidDeletion' and 'in_use' should "
              "become 'False'", LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()

    # Disk1 states: 'forbidDeletion': False, 'in_use': False
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=False, in_use=False),
                     pod_names=[])
    # Disk2 states: 'forbidDeletion': False, 'in_use': False
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=False, in_use=False),
                     pod_names=[])

    log_debug("Delete 'disk1' using '--name'", LOG)
    res = pv1.delete(name=pv1_name)
    assert_eq(res['status'], 'OK')

    log_debug("Delete 'disk2' using '--id'", LOG)
    res = pv2.delete(id=pv2_id)
    assert_eq(res['status'], 'OK')

    log_debug("Check that 'disk1' is deleted", LOG)
    with assert_raises(NonZeroRetCodeException, 'Error: Unknown name'):
        pv1.info()

    log_debug("Check that 'disk2' is deleted", LOG)
    with assert_raises(NonZeroRetCodeException, 'Error: Unknown name'):
        pv2.info()
def test_zfs_volumes_mount_properly(cluster):
    """
    Automate TestRail case: Deploy with ZFS parameter

    https://cloudlinux.testrail.net/index.php?/cases/view/81
    """
    image = 'nginx'
    pv_name = utils.get_rnd_low_string(prefix='zfs_pv_')
    pv_mpath = '/usr/share/nginx/html'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    pod = cluster.pods.create(image,
                              'nginx_zfs_volume_mounts',
                              pvs=[pv],
                              ports_to_open=(80, ),
                              start=True,
                              wait_for_status='running',
                              wait_ports=True)

    pod_owner = cluster.users.get(name=pod.owner)
    pv_mountpoint = os.path.join(ZFS_POOL_MOUNTPOINT, str(pod_owner.get('id')),
                                 pv_name)
    check_volume_mounts(cluster, pod, log_msg_prefix='BEFORE NODE REBOOT: ')

    utils.log_debug("Write a file 'test.txt' to PV and get it via HTTP", LOG)
    c_id = pod.get_container_id(container_image=image)
    pod.docker_exec(c_id, 'echo -n TEST > {}/test.txt'.format(pv_mpath))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    # Reboot Node
    cluster.nodes.get_node(pod.node).reboot()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_ports()

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER NODE REBOOT: ')

    utils.log_debug(
        "Make sure that we can get 'test.txt' via HTTP after node reboot", LOG)
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    c_id = pod.get_container_id(container_image=image)

    utils.log_debug('Restart Pod and check that volumes are mounted correctly')
    pod.redeploy()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_status('running')

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER POD RESTART: ')

    node = pod.node
    pod.delete()
    pv.delete()

    utils.log_debug(
        "Make sure that '{}' is not mounted after PV deletion".format(pv_name),
        LOG)
    with utils.assert_raises(NonZeroRetCodeException,
                             expected_ret_codes=GREP_EXIT_CODES):
        utils.retry(assert_volume_mounts,
                    cluster=cluster,
                    mountpoint=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)

    utils.log_debug(
        "Make sure that '{}' is not in mountpoints".format(pv_name), LOG)
    pool_path = os.path.join(ZFS_POOL, str(pod_owner.get('id')), pv_name)

    with utils.assert_raises(NonZeroRetCodeException,
                             'dataset does not exist'):
        utils.retry(assert_zfs_mount_points,
                    cluster=cluster,
                    pool_path=pool_path,
                    volume_mp=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)
def test_delete_node_with_pv(cluster):
    """
    Scenario as follows:
    1. Create 2 pods(pod1, pod2) with PVs(pv1, pv2).
    2. Try to delete node. This should fail.
    3. Delete pod2 and pv2.
    4. Try to delete node. This should fail again.
    5. Delete pod1.
    6. Try to delete node. This shuild fail again.
    7. Delete pv1.
    8. Delete node. Node should be deleted.
    """
    utils.log_debug('===== Delete Node with PV =====', LOG)

    pv_name1 = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path1 = '/nginxpv1'
    pv_name2 = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path2 = '/nginxpv2'

    pv1 = cluster.pvs.add('new', pv_name1, mount_path1)
    pv2 = cluster.pvs.add('new', pv_name2, mount_path2)

    pod1 = cluster.pods.create('nginx',
                               'test_nginx_pod_1',
                               pvs=[pv1],
                               start=False)
    pod2 = cluster.pods.create('nginx',
                               'test_nginx_pod_2',
                               pvs=[pv1, pv2],
                               start=True,
                               wait_for_status='running')

    hosting_node = cluster.nodes.get_node(pod2.node)

    pod2.stop()
    pod2.wait_for_status('stopped')

    # Try to delete node with pv1 and pv2 on it. Should fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pod2.delete()
    pv2.delete()
    # Try to delete node with pv1 on it. Should fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pod1.delete()
    # pod1 is deleted, but pv1 is still linked to the node.
    # deletion will fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pv1.delete()
    # no pvs left on node, so it can be deleted with no problem.
    hosting_node.delete()
def test_overuse_pv_quota(cluster):
    """
    Scenario as follows:
    1. Create pod with PV(size 1GB) on it
    2. Write 640MB of data on the attached PV. Operation should complete with
        no errors
    3. Try to write another 512MB of data on the same PV. This should fail,
        due to insufficent disk space
    """
    utils.log_debug('===== Overuse Disk quota =====', LOG)
    pv_name = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, mount_path)
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod_1',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')

    container_id = pod.get_container_id(container_image='nginx')
    # write 640MB to PV
    cmd1 = 'dd if=/dev/zero of={}/tempfile1 bs=64M ' \
           'count=10'.format(mount_path)
    utils.log_debug('Before wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    # should fail, due to insufficent disk space
    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('Before wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    utils.log_debug('Restart pod with wipe out', LOG)
    pod.redeploy(wipeOut=True)
    utils.wait_for(
        lambda: container_id != pod.get_container_id(container_image='nginx'))
    pod.wait_for_status('running')
    container_id = pod.get_container_id(container_image='nginx')

    utils.log_debug('After wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('After wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    pod.delete()
    pv.delete()