Example #1
0
    def create(cls, cluster, image, name, kube_type, kubes, open_all_ports,
               restart_policy, pvs, owner, password, ports_to_open, domain):
        """
        Create new pod in kuberdock
        :param open_all_ports: if true, open all ports of image (does not mean
        these are Public IP ports, depends on a cluster setup)
        :param ports_to_open: if open_all_ports is False, open only the ports
        from this list
        :return: object via which Kuberdock pod can be managed
        """
        def _get_image_ports(img):
            _, out, _ = cluster.kcli('image_info {}'.format(img),
                                     out_as_dict=True,
                                     user=owner)

            return [
                cls.Port(int(port['number']), port['protocol'])
                for port in out['ports']
            ]

        def _ports_to_dict(ports):
            """
            :return: list of dictionaries with ports, necessary for
            creation of general pod via kcli2
            """
            ports_list = []
            for port in ports:
                ports_list.append(
                    dict(containerPort=port.port,
                         hostPort=port.port,
                         isPublic=(open_all_ports
                                   or port.port in ports_to_open),
                         protocol=port.proto))
            return ports_list

        escaped_name = pipes.quote(name)
        kube_types = {"Tiny": 0, "Standard": 1, "High memory": 2}
        pod_spec = dict(kube_type=kube_types[kube_type],
                        restartPolicy=restart_policy,
                        name=escaped_name)
        container = dict(kubes=kubes,
                         image=image,
                         name=utils.get_rnd_low_string(length=11))
        ports = utils.retry(_get_image_ports, img=image)
        container.update(ports=_ports_to_dict(ports))
        if pvs is not None:
            container.update(volumeMounts=[pv.volume_mount_dict for pv in pvs])
            pod_spec.update(volumes=[pv.volume_dict for pv in pvs])
        pod_spec.update(containers=[container])
        if domain:
            pod_spec["domain"] = domain
        pod_spec = json.dumps(pod_spec, ensure_ascii=False)

        _, out, _ = cluster.kcli2(u"pods create '{}'".format(pod_spec),
                                  out_as_dict=True,
                                  user=owner,
                                  password=(password or owner))
        this_pod_class = cls._get_pod_class(image)
        return this_pod_class(cluster, image, name, kube_type, kubes,
                              open_all_ports, restart_policy, pvs, owner)
Example #2
0
    def create_pa(self,
                  template_name,
                  plan_id=1,
                  wait_ports=False,
                  healthcheck=False,
                  wait_for_status=None,
                  owner='test_user',
                  command="kcli2",
                  rnd_str='test_data_',
                  pod_name=None):
        """Create new pod with predefined application in the Kuberdock.

        :param rnd_str: string which will be applied to the name of
            persistent volumes, which will be created with the pod
        :return: object via which Kuberdock pod can be managed

        """
        pod = KDPAPod.create(
            self.cluster, template_name, plan_id, owner, command,
            utils.get_rnd_low_string(prefix=rnd_str, length=5), pod_name)

        if wait_for_status:
            pod.wait_for_status(wait_for_status)
        if wait_ports:
            pod.wait_for_ports()
        if healthcheck:
            pod.healthcheck()

        return pod
def test_move_pods_and_delete_node_with_ceph_storage(cluster):
    pv_name = utils.get_rnd_low_string(prefix='ceph_pv_')
    pv_mpath = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    # NOTE: we want to make sure that pod lands on 'node1', because 'node4'
    # will be deleted later on.
    with cluster.temporary_stop_host('node4'):
        pod = cluster.pods.create('nginx',
                                  'test_nginx_pod_1',
                                  pvs=[pv],
                                  start=False)
        pod.start()
        pod.wait_for_status('running')

    prev_node = pod.node
    with cluster.temporary_stop_host(prev_node):
        utils.wait_for(lambda: pod.node != prev_node)
        pod.wait_for_status('running')

    utils.log_debug(
        "Delete node '{}' which is hosting the pod. Pod should move to "
        "node '{}'".format(pod.node, prev_node))
    hosting_node = cluster.nodes.get_node(node_name=pod.node)
    hosting_node.delete()

    utils.wait_for(lambda: pod.node == prev_node)
    pod.wait_for_status('running')
def test_overuse_pv_quota(cluster):
    """
    Scenario as follows:
    1. Create pod with PV(size 1GB) on it
    2. Write 640MB of data on the attached PV. Operation should complete with
        no errors
    3. Try to write another 512MB of data on the same PV. This should fail,
        due to insufficent disk space
    """
    utils.log_debug('===== Overuse Disk quota =====', LOG)
    pv_name = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path = '/nginxpv'
    pv = cluster.pvs.add('dummy', pv_name, mount_path)
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod_1',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running')

    container_id = pod.get_container_id(container_image='nginx')
    # write 640MB to PV
    cmd1 = 'dd if=/dev/zero of={}/tempfile1 bs=64M ' \
           'count=10'.format(mount_path)
    utils.log_debug('Before wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    # should fail, due to insufficent disk space
    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('Before wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    utils.log_debug('Restart pod with wipe out', LOG)
    pod.redeploy(wipeOut=True)
    utils.wait_for(
        lambda: container_id != pod.get_container_id(container_image='nginx'))
    pod.wait_for_status('running')
    container_id = pod.get_container_id(container_image='nginx')

    utils.log_debug('After wipe out: write 640MBs to disk', LOG)
    pod.docker_exec(container_id, cmd1)

    with utils.assert_raises(NonZeroRetCodeException,
                             DISK_QUOTA_EXCEEDED_MSGS):
        utils.log_debug('After wipe out: write 512MBs to disk', LOG)
        cmd2 = 'dd if=/dev/zero of={}/tempfile2 bs=64M count=8'.format(
            mount_path)
        pod.docker_exec(container_id, cmd2)

    pod.delete()
    pv.delete()
Example #5
0
 def __init__(self, cluster, kind, name, mount_path, size, owner=None):
     self.cluster = cluster
     self.name = name
     self.owner = owner
     self.mount_path = mount_path
     self.volume_name = utils.get_rnd_low_string(length=11)
     inits = {
         "new": self._create_new,
         "existing": self._load_existing,
         "dummy": self._create_dummy
     }
     try:
         inits[kind](size)
     except KeyError:
         raise AssertionError(
             "Integration test API PV type not in {}".format(inits.keys()))
Example #6
0
def test_pod_with_domain_name(cluster):
    suffix = get_rnd_low_string(length=5)
    pod_name = format(suffix)
    with open("tests_integration/assets/cpanel_credentials.json") as f:
        creds = json.load(f)
    log_debug("Start a pod with shared IP", LOG)
    pod = cluster.pods.create("nginx",
                              pod_name,
                              ports_to_open=[80],
                              wait_for_status="running",
                              domain=creds["domain"],
                              healthcheck=True,
                              wait_ports=True)
    log_debug("Restart the pod with shared IP", LOG)
    pod.redeploy()
    try:
        pod.wait_for_status("pending", tries=5, interval=3)
    except StatusWaitException:
        # When is rebooted pod often gets "pending" status for short time,
        # so this status isn't guaranteed to be catched by pod.wait_for_status
        pass
    pod.wait_for_status("running")
    assert_eq(pod.domain, "testuser-{}.{}".format(suffix, creds["domain"]))
    pod.wait_for_ports([80])
    pod.healthcheck()

    log_debug("Start and stop the pod with shared IP", LOG)
    pod.stop()
    pod.wait_for_status("stopped")
    pod.start()
    pod.wait_for_status("running")
    pod.wait_for_ports([80])
    pod.healthcheck()

    log_debug("Change number of kubes in the pod with shared IP", LOG)
    pod.change_kubes(kubes=3, container_image="nginx")
    try:
        # right after starting changing number of kubes pod is still running
        # for several seconds
        pod.wait_for_status("pending", tries=12)
    except StatusWaitException:
        # "pending" status lasts for very short time and may be not detected
        pass
    pod.wait_for_status("running")
    pod.wait_for_ports([80])
    pod.healthcheck()
Example #7
0
def test_pod_with_long_domain_name(cluster):
    """
     Tes that pod with domain name's length equaling 63 (kubernetes
     limitation) symbols can be created and accessed
    """
    with open("tests_integration/assets/cpanel_credentials.json") as f:
        creds = json.load(f)

    # Adjusting pod name's length to make domain name's length equal 63. 53
    # is 63 - 10 (length of "testuser-.")
    pod_name = get_rnd_low_string(length=53 - len(creds["domain"]))

    log_debug(
        "Start the pod with shared IP, having domain name consisting "
        "of 63 symbols", LOG)
    pod = cluster.pods.create("nginx",
                              pod_name,
                              ports_to_open=[80],
                              wait_for_status="running",
                              domain=creds["domain"],
                              healthcheck=True,
                              wait_ports=True)
    assert_eq(pod.domain, "testuser-{}.{}".format(pod_name, creds["domain"]))
def test_add_new_block_device(cluster):
    """
    Add a new block device into ZFS pool (Non-AWS)
    """
    for node in cluster.node_names:
        utils.log_debug("Add new block device to node '{}'".format(node), LOG)
        # NOTE: Generate a new file each time, so that if test is run on a
        # cluster multiple times nothing is broken, otherwise if we attach two
        # volumes with the same name, ZFS pool will be broken.
        # FIXME: Tried to detach the volume after the test is complete, but
        # couldn't figure out how to do it properly.
        of_path = utils.get_rnd_low_string(prefix='/tmp/dev', length=5)

        write_file_cmd = 'dd if=/dev/zero of="{}" bs=64M count=10'.format(
            of_path)
        cluster.ssh_exec(node, write_file_cmd)

        add_bl_device_cmd = ('node-storage add-volume --hostname {} '
                             '--devices {}'.format(node, of_path))
        cluster.manage(add_bl_device_cmd)

        utils.log_debug("Make sure a new block device is added", LOG)
        _, out, _ = cluster.ssh_exec(node, 'zpool status', sudo=True)
        utils.assert_in(of_path, out)
def test_zfs_volumes_mount_properly(cluster):
    """
    Automate TestRail case: Deploy with ZFS parameter

    https://cloudlinux.testrail.net/index.php?/cases/view/81
    """
    image = 'nginx'
    pv_name = utils.get_rnd_low_string(prefix='zfs_pv_')
    pv_mpath = '/usr/share/nginx/html'
    pv = cluster.pvs.add('dummy', pv_name, pv_mpath)
    pod = cluster.pods.create(image,
                              'nginx_zfs_volume_mounts',
                              pvs=[pv],
                              ports_to_open=(80, ),
                              start=True,
                              wait_for_status='running',
                              wait_ports=True)

    pod_owner = cluster.users.get(name=pod.owner)
    pv_mountpoint = os.path.join(ZFS_POOL_MOUNTPOINT, str(pod_owner.get('id')),
                                 pv_name)
    check_volume_mounts(cluster, pod, log_msg_prefix='BEFORE NODE REBOOT: ')

    utils.log_debug("Write a file 'test.txt' to PV and get it via HTTP", LOG)
    c_id = pod.get_container_id(container_image=image)
    pod.docker_exec(c_id, 'echo -n TEST > {}/test.txt'.format(pv_mpath))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    # Reboot Node
    cluster.nodes.get_node(pod.node).reboot()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_ports()

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER NODE REBOOT: ')

    utils.log_debug(
        "Make sure that we can get 'test.txt' via HTTP after node reboot", LOG)
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)

    c_id = pod.get_container_id(container_image=image)

    utils.log_debug('Restart Pod and check that volumes are mounted correctly')
    pod.redeploy()

    utils.wait_for(lambda: c_id != pod.get_container_id(container_image=image))
    pod.wait_for_status('running')

    check_volume_mounts(cluster, pod, log_msg_prefix='AFTER POD RESTART: ')

    node = pod.node
    pod.delete()
    pv.delete()

    utils.log_debug(
        "Make sure that '{}' is not mounted after PV deletion".format(pv_name),
        LOG)
    with utils.assert_raises(NonZeroRetCodeException,
                             expected_ret_codes=GREP_EXIT_CODES):
        utils.retry(assert_volume_mounts,
                    cluster=cluster,
                    mountpoint=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)

    utils.log_debug(
        "Make sure that '{}' is not in mountpoints".format(pv_name), LOG)
    pool_path = os.path.join(ZFS_POOL, str(pod_owner.get('id')), pv_name)

    with utils.assert_raises(NonZeroRetCodeException,
                             'dataset does not exist'):
        utils.retry(assert_zfs_mount_points,
                    cluster=cluster,
                    pool_path=pool_path,
                    volume_mp=pv_mountpoint,
                    node=node,
                    assertion=utils.assert_not_in,
                    tries=3,
                    interval=60)
def test_delete_node_with_pv(cluster):
    """
    Scenario as follows:
    1. Create 2 pods(pod1, pod2) with PVs(pv1, pv2).
    2. Try to delete node. This should fail.
    3. Delete pod2 and pv2.
    4. Try to delete node. This should fail again.
    5. Delete pod1.
    6. Try to delete node. This shuild fail again.
    7. Delete pv1.
    8. Delete node. Node should be deleted.
    """
    utils.log_debug('===== Delete Node with PV =====', LOG)

    pv_name1 = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path1 = '/nginxpv1'
    pv_name2 = utils.get_rnd_low_string(prefix='integr_test_disk_')
    mount_path2 = '/nginxpv2'

    pv1 = cluster.pvs.add('new', pv_name1, mount_path1)
    pv2 = cluster.pvs.add('new', pv_name2, mount_path2)

    pod1 = cluster.pods.create('nginx',
                               'test_nginx_pod_1',
                               pvs=[pv1],
                               start=False)
    pod2 = cluster.pods.create('nginx',
                               'test_nginx_pod_2',
                               pvs=[pv1, pv2],
                               start=True,
                               wait_for_status='running')

    hosting_node = cluster.nodes.get_node(pod2.node)

    pod2.stop()
    pod2.wait_for_status('stopped')

    # Try to delete node with pv1 and pv2 on it. Should fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pod2.delete()
    pv2.delete()
    # Try to delete node with pv1 on it. Should fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pod1.delete()
    # pod1 is deleted, but pv1 is still linked to the node.
    # deletion will fail.
    with utils.assert_raises(
            NonZeroRetCodeException,
            "Node 'node1' can't be deleted. Reason: users Persistent volumes "
            "located on the node.*"):
        hosting_node.delete()

    pv1.delete()
    # no pvs left on node, so it can be deleted with no problem.
    hosting_node.delete()