def check_custom_port(cluster, port, proto, is_open=False):
    msg = "Check that port: '{proto}:{port}' on node '{node}' is '{state}'"
    for node in cluster.node_names:
        utils.log_debug(
            msg.format(proto=proto,
                       port=port,
                       node=node,
                       state='open' if is_open else 'closed'), LOG)
        node_ip = cluster.nodes.get_node_data(node).get('ip')
        if proto == 'tcp' and is_open:
            with paramiko_expect_http_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                res = unregistered_host_port_check(node_ip, port)
                utils.assert_in('Directory listing for /', res)
        elif proto == 'tcp' and not is_open:
            with paramiko_expect_http_server(cluster, node, port), \
                utils.assert_raises(NonZeroRetCodeException,
                                    expected_ret_codes=CURL_CONNECTION_ERRORS):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_port_check(node_ip, port)
        elif proto == 'udp' and is_open:
            with paramiko_expect_udp_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                out = unregistered_host_udp_check(node_ip, port)
                utils.assert_eq('PONG', out)
        else:
            with paramiko_expect_udp_server(cluster, node, port), \
                utils.assert_raises(
                    NonZeroRetCodeException, 'socket.timeout: timed out'):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_udp_check(node_ip, port)
def test_redis_pa_highmem(cluster):
    pod = cluster.pods.create_pa('custom_redis.yaml', plan_id=2,
                                 wait_ports=True,
                                 wait_for_status='running',
                                 healthcheck=True)
    spec = pod.get_spec()
    assert_eq(spec['containers'][0]['kubes'], 4)
示例#3
0
    def _check_list_output(command):
        methods = {
            "kdctl": cluster.kdctl,
            "kcli2": cluster.kcli2
        }

        _, out, _ = methods[command]("predefined-apps list", out_as_dict=True)
        listed_templates = {t["name"]: t for t in out["data"]}
        assert_eq(len(listed_templates), len(templates))
        listed_names = [t for t in listed_templates]
        not_listed_names = []
        # Check that all PA templates added to Kuberdock are listed
        for template in templates:
            name = template["template name"]
            if name not in listed_names:
                not_listed_names.append(name)
        if not_listed_names:
            raise PATemplateNotInList(
                "PA template(s) {} were(was) added to kuberdock, but "
                "aren't (isn't) listed by '{} predefined-apps list'".
                format(", ".join(not_listed_names), command))

        # If all of them are listed, check that they are listed correctly
        for t in templates:
            _check_pa_template(listed_templates[t["template name"]],
                               origin=t.get("origin"))
示例#4
0
def test_cannot_add_pod_if_no_free_ips_available(cluster):
    expected_pod_count = 3
    # 2 IP addresses in a network
    cluster.ip_pools.add('192.168.0.0/31', 'node1')
    # 1 IP address in a network
    cluster.ip_pools.add('192.168.1.0/32', 'node2')

    pods = create_new_pods(cluster,
                           expected_pod_count,
                           open_all_ports=True,
                           wait_for_status='running')

    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        create_new_pods(cluster, num=1, open_all_ports=True)

    # Make sure there are still expected_pod_count of pods
    assert_eq(len(cluster.pods.filter_by_owner()), expected_pod_count)

    # Remove a pod to free an IP an try to add a new one - should succeed
    pods[0].delete()
    create_new_pods(cluster,
                    num=1,
                    open_all_ports=True,
                    wait_for_status='running')

    # It's not possible to create a pod once again
    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        create_new_pods(cluster, num=1, open_all_ports=True)

    # But it's possible to create a pod without a public IP
    create_new_pods(cluster, open_all_ports=False, wait_for_status='running')

    # Make sure there are +1 pods
    pod_count = len(cluster.pods.filter_by_owner())
    assert_eq(pod_count, expected_pod_count + 1)
示例#5
0
def get_only_yaml_part_of_pa_template(cluster):
    _, template, _ = cluster.ssh_exec("master", "cat {}".format(
        PREDEFINED_APPLICATION_FILES["drupal"]))
    name = "my pa"
    cluster.kdctl("predefined-apps create --name '{}' '{}'".
                  format(name, template))
    _, out, _ = cluster.kdctl("predefined-apps get --name '{}' --file-only".
                              format(name))
    assert_eq(out, template)
示例#6
0
def test_cadvisor_errors(cluster):
    # type: (KDIntegrationTestAPI) -> None
    """Check cadvisor error/warning appears in uwsgi (AC-3499)"""

    cluster.kdctl('pricing license show')
    # TODO: Remove once AC-3618 implemented
    cmd = "journalctl --since '15 min ago' -m -t uwsgi | " \
          "grep -v 'ssl_stapling' | egrep 'warn|err' | tail -n 100"
    _, out, err = cluster.ssh_exec('master', cmd)
    utils.assert_eq((out + err).strip(), '')
def _check_visible_ip(pod, specs, connection_list):
    if pod.public_ip:
        LOG.debug('{}Check if pod IP is visible as public IP for pod with '
                  'public IP\nExpected: {} Actual: {}{}'.format(
                      Fore.CYAN, pod.public_ip, connection_list[-1],
                      Style.RESET_ALL))
        utils.assert_eq(connection_list[-1], pod.public_ip)
    else:
        LOG.debug('{}Check if pod IP is visible as node IP for pod without '
                  'public IP\nExpected: {} Actual: {}{}'.format(
                      Fore.CYAN, specs[pod.name]['hostIP'],
                      connection_list[-1], Style.RESET_ALL))
        utils.assert_eq(connection_list[-1], specs[pod.name]['hostIP'])
示例#8
0
def test_update_pa_template_by_name(cluster):
    """Check that PA template can be updated.

    At first the dokuwiki PA template is created. Then it's updated by
    dpupal.yaml, and checked.

    """
    name = "my pa"
    _pa_create(cluster, name, f=PREDEFINED_APPLICATION_FILES["dokuwiki"])
    _pa_update(cluster, f=PREDEFINED_APPLICATION_FILES["drupal"], name=name)

    template = _pa_get(cluster, file_only=True, name=name)
    _, yaml, _ = cluster.ssh_exec("master", "cat {}".
                                  format(PREDEFINED_APPLICATION_FILES["drupal"]))
    assert_eq(template, yaml)
示例#9
0
def test_validating_yaml_before_updating_pa_template(cluster):
    name = "my pa"
    _pa_create(cluster, name, f=PREDEFINED_APPLICATION_FILES["dokuwiki"])

    # Check that --validate flag prevents updating pa template by invalid yaml
    _pa_update(cluster, data="'some: invalid\nexpression'", validate=True,
               name=name)

    # Check that --validate flag allows updating pa template by valid yaml
    _, correct_yaml, _ = cluster.ssh_exec("master", "cat {}".
                                          format(PREDEFINED_APPLICATION_FILES
                                                 ["drupal"]))
    _pa_update(cluster, data=correct_yaml, validate=True, name=name)
    template = _pa_get(cluster, file_only=True, name=name)
    assert_eq(template, correct_yaml)
示例#10
0
 def _generic_healthcheck(self):
     spec = self.get_spec()
     utils.assert_eq(spec['kube_type'],
                     utils.kube_type_to_int(self.kube_type))
     for container in spec['containers']:
         utils.assert_eq(container['kubes'], self.kubes)
     utils.assert_eq(spec['restartPolicy'], self.restart_policy)
     utils.assert_eq(spec['status'], "running")
     return spec
示例#11
0
def test_pod_with_domain_name(cluster):
    suffix = get_rnd_low_string(length=5)
    pod_name = format(suffix)
    with open("tests_integration/assets/cpanel_credentials.json") as f:
        creds = json.load(f)
    log_debug("Start a pod with shared IP", LOG)
    pod = cluster.pods.create("nginx",
                              pod_name,
                              ports_to_open=[80],
                              wait_for_status="running",
                              domain=creds["domain"],
                              healthcheck=True,
                              wait_ports=True)
    log_debug("Restart the pod with shared IP", LOG)
    pod.redeploy()
    try:
        pod.wait_for_status("pending", tries=5, interval=3)
    except StatusWaitException:
        # When is rebooted pod often gets "pending" status for short time,
        # so this status isn't guaranteed to be catched by pod.wait_for_status
        pass
    pod.wait_for_status("running")
    assert_eq(pod.domain, "testuser-{}.{}".format(suffix, creds["domain"]))
    pod.wait_for_ports([80])
    pod.healthcheck()

    log_debug("Start and stop the pod with shared IP", LOG)
    pod.stop()
    pod.wait_for_status("stopped")
    pod.start()
    pod.wait_for_status("running")
    pod.wait_for_ports([80])
    pod.healthcheck()

    log_debug("Change number of kubes in the pod with shared IP", LOG)
    pod.change_kubes(kubes=3, container_image="nginx")
    try:
        # right after starting changing number of kubes pod is still running
        # for several seconds
        pod.wait_for_status("pending", tries=12)
    except StatusWaitException:
        # "pending" status lasts for very short time and may be not detected
        pass
    pod.wait_for_status("running")
    pod.wait_for_ports([80])
    pod.healthcheck()
示例#12
0
def test_validate_yaml(cluster):
    # Check that --validate flag prevents creating invalid PA template
    with assert_raises(NonZeroRetCodeException, "Unable to parse template"):
        _pa_create(cluster, "incorrect pa",
                   data="incorrect: template\nexpression",
                   validate=True, check_output=False)

    # Check that --validate flag allows creating valid template
    name = 'correct pa'
    _pa_create(cluster, name, f=PREDEFINED_APPLICATION_FILES["dokuwiki"],
               validate=True, check_output=True)

    # Check that PA template list contains only "correct pa"
    _, out, _ = cluster.kdctl("predefined-apps list")
    templates = json.loads(out)["data"]
    assert_eq(len(templates), 1)
    assert_eq(templates[0]["name"], name)
示例#13
0
def test_pod_ip_resource(cluster):
    # type: (KDIntegrationTestAPI) -> None
    # It's not possible to create a POD with public IP with no IP pools
    cluster.ip_pools.clear()
    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        cluster.pods.create("nginx",
                            "test_nginx_pod_2",
                            open_all_ports=True,
                            start=True)

    assert_eq(cluster.pods.filter_by_owner(), [])

    # It's still possible to create a pod without a public IP
    cluster.pods.create("nginx",
                        "test_nginx_pod_3",
                        start=True,
                        open_all_ports=False,
                        wait_for_status='running')
示例#14
0
def test_pod_with_pv_restore(cluster):
    """Test that pod with PVs can be restored.

    :type cluster: KDIntegrationTestAPI
    """
    file_name = BACKUP_FILES[NGINX_WITH_PV]
    backup_url = "http://node1/backups"
    path_template = '{owner_id}/{volume_name}.tar.gz'
    # Test that pod with persistent volume can be restored
    pod = cluster.pods.restore(USER, file_path=file_name,
                               pv_backups_location=backup_url,
                               pv_backups_path_template=path_template,
                               wait_for_status="running")
    pod.wait_for_ports()
    assert_in("This page has been restored from tar.gz",
              pod.do_GET(path='/restored_location/'))
    old_id = pod.pod_id

    # Test that pod isn't removed if pod with same name is restored with
    # --force-not-delete flag
    with assert_raises(NonZeroRetCodeException,
                       'Pod with name .* already exists'):
        cluster.pods.restore(USER, file_path=file_name,
                             pv_backups_location=backup_url,
                             pv_backups_path_template=path_template,
                             flags="--force-not-delete")
    # If pod has't been restored, it's id should not be changed
    assert_eq(old_id, pod.pod_id)

    # Test that pod is removed together with disks if pod with same name
    # and same disks names is restored with --force-delete flag
    path_template = '{owner_name}/{volume_name}.zip'
    pod2 = cluster.pods.restore(USER, file_path=file_name,
                                pv_backups_location=backup_url,
                                pv_backups_path_template=path_template,
                                flags="--force-delete",
                                return_as_json=True)
    # If pod was restored than it's id should distinguish from id of pod
    # with same name, that has just been removed
    assert_not_eq(old_id, pod2.pod_id)
    pod2.wait_for_ports()
    assert_in("This page has been restored from zip",
              pod.do_GET(path='/restored_location/'))
示例#15
0
    def _open_custom_ports(self):
        try:
            self.cluster.kdctl('allowed-ports open {} {}'.format(
                self.TCP_PORT_TO_OPEN, 'tcp'))
            self.cluster.kdctl('allowed-ports open {} {}'.format(
                self.UDP_PORT_TO_OPEN, 'udp'))
            _, out, _ = self.cluster.kdctl('allowed-ports list',
                                           out_as_dict=True)
            custom_ports = out['data']
            # Make sure that two ports are opened
            assert_eq(len(custom_ports), 2)

            # Make sure that both ports opened correctly
            assert_in(dict(port=self.TCP_PORT_TO_OPEN, protocol='tcp'),
                      custom_ports)
            assert_in(dict(port=self.UDP_PORT_TO_OPEN, protocol='udp'),
                      custom_ports)

        except (NonZeroRetCodeException, AssertionError) as e:
            log_debug("Couldn't open ports. Reason: {}".format(e))
示例#16
0
def test_a_pv_created_separately(cluster):
    # type: (KDIntegrationTestAPI) -> None
    pv_name = utils.gen_rnd_ceph_pv_name()
    pv_size = 2
    mount_path = '/nginxpv'

    # It is possible to create a separate PV
    pv = cluster.pvs.add("new", pv_name, mount_path, pv_size)
    assert pv.exists()
    utils.assert_eq(pv.size, pv_size)

    # It's possible to use separately created PV for nginx pod
    cluster.pods.create("nginx",
                        "test_nginx_pod_3",
                        pvs=[pv],
                        wait_for_status='running')

    # TODO: place correct exception and regexp to args of assertRaisesRegexp
    # TODO: and uncomment the next block. Currently blocked by AC-3689
    '''
示例#17
0
 def healthcheck(self):
     spec = self._generic_healthcheck()
     assert_eq(len(spec['containers']), 1)
     assert_eq(spec['containers'][0]['image'], 'redis:3')
     r = redis.StrictRedis(host=self.host, port=6379, db=0)
     r.set('foo', 'bar')
     assert_eq(r.get('foo'), 'bar')
示例#18
0
 def healthcheck(self):
     spec = self._generic_healthcheck()
     assert_eq(len(spec['containers']), 1)
     assert_eq(spec['containers'][0]['image'], 'memcached:1')
     mc = memcache.Client(['{host}:11211'.format(host=self.host)], debug=0)
     mc.set("foo", "bar")
     assert_eq(mc.get("foo"), "bar")
示例#19
0
def test_pod_lands_on_correct_node_after_change_kubetype(cluster):
    for node, kube_type in [('node1', 'Standard'), ('node2', 'Tiny'),
                            ('node3', 'High memory')]:
        info = cluster.nodes.get_node_info(node)
        assert_eq(info['kube_id'], kube_type_to_int(kube_type))

    pod = cluster.pods.create("nginx",
                              "test_nginx_pod",
                              kube_type='Tiny',
                              wait_ports=True,
                              healthcheck=True,
                              wait_for_status='running',
                              open_all_ports=True)
    assert_eq(pod.node, 'node2')

    pod.change_kubetype(kube_type=1)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    pod.healthcheck()
    assert_eq(pod.node, 'node1')

    pod.change_kubetype(kube_type=2)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    pod.healthcheck()
    assert_in(pod.node, 'node3')
示例#20
0
def test_pod_with_long_domain_name(cluster):
    """
     Tes that pod with domain name's length equaling 63 (kubernetes
     limitation) symbols can be created and accessed
    """
    with open("tests_integration/assets/cpanel_credentials.json") as f:
        creds = json.load(f)

    # Adjusting pod name's length to make domain name's length equal 63. 53
    # is 63 - 10 (length of "testuser-.")
    pod_name = get_rnd_low_string(length=53 - len(creds["domain"]))

    log_debug(
        "Start the pod with shared IP, having domain name consisting "
        "of 63 symbols", LOG)
    pod = cluster.pods.create("nginx",
                              pod_name,
                              ports_to_open=[80],
                              wait_for_status="running",
                              domain=creds["domain"],
                              healthcheck=True,
                              wait_ports=True)
    assert_eq(pod.domain, "testuser-{}.{}".format(pod_name, creds["domain"]))
示例#21
0
 def healthcheck(self):
     self._generic_healthcheck()
     mongo = pymongo.MongoClient(self.host, 27017)
     test_db = mongo.test_db
     assert_eq(u'test_db', test_db.name)
     obj_id = test_db.test_collection.insert_one({"x": 1}).inserted_id
     obj = test_db.test_collection.find_one()
     assert_eq(obj_id, obj['_id'])
     assert_eq(1, obj['x'])
def assert_pv_states(pv_info, expected_states, pod_names=None):
    """
    Check that PV states are correct and that pods from pod_names are listed
    in linkedPods
    :param pv_info: PV info dictionary
    :param exected_states: States to check, ex. {'forbidDeletion': False}
    :param pod_names: Check that pod_names are present in linkedPods of a PV
                      Defaults to None, i.e. don't check
    """
    for state_name, state_value in expected_states.items():
        assert_eq(pv_info[state_name], state_value)

    if pod_names is None:
        return

    linked_pods = pv_info['linkedPods']
    assert_eq(len(linked_pods), len(pod_names))

    for pod_name in pod_names:
        assert_eq(len([p for p in linked_pods if p['name'] == pod_name]), 1)
示例#23
0
def test_pod_lands_on_correct_node_given_a_kubetype(cluster):
    # type: (KDIntegrationTestAPI) -> None
    # Ensure nodes have expected kube types
    for node, kube_type in [('node1', 'Standard'), ('node2', 'Tiny')]:
        info = cluster.nodes.get_node_info(node)
        assert_eq(info['kube_id'], kube_type_to_int(kube_type))

    # Should land on kd_node1 (see pipeline definition)
    pod1 = cluster.pods.create("nginx",
                               "test_nginx_pod_1",
                               kube_type='Standard')
    # Should land on kd_node2 (see pipeline definition)
    pod2 = cluster.pods.create("nginx", "test_nginx_pod_2", kube_type='Tiny')

    pod1.wait_for_status('running')
    pod2.wait_for_status('running')

    pod_hosts = {n['name']: n['host'] for n in cluster.pods.filter_by_owner()}

    cluster.assert_pods_number(2)
    assert_eq(pod_hosts['test_nginx_pod_1'], 'node1')
    assert_eq(pod_hosts['test_nginx_pod_2'], 'node2')
示例#24
0
def test_cannot_create_pod_with_public_ip_with_no_pools(cluster):
    with assert_raises(NonZeroRetCodeException, NO_FREE_IPS_ERR_MSG):
        create_new_pods(cluster, num=1, open_all_ports=True)

    assert_eq(cluster.pods.filter_by_owner(), [])
示例#25
0
 def assert_pods_number(self, number):
     utils.assert_eq(len(self.pods.filter_by_owner()), number)
示例#26
0
 def healthcheck(self):
     # Not passing for now: AC-3199
     rc, _, _ = self.ssh_exec("master",
                              "kuberdock-upgrade health-check-only")
     utils.assert_eq(rc, 0)
示例#27
0
def test_pod_migrate_on_correct_node_after_change_kubetype(cluster):
    for node, kube_type in [('node1', 'Standard'), ('node2', 'Tiny'),
                            ('node3', 'High memory')]:
        info = cluster.nodes.get_node_info(node)
        assert_eq(info['kube_id'], kube_type_to_int(kube_type))

    # type: (KDIntegrationTestAPI) -> None
    # We have issue related to using non-unique disk names within
    # same CEPH pool (AC-3831). That is why name is randomized.
    pv_name = gen_rnd_ceph_pv_name()

    mount_path = '/usr/share/nginx/html'

    # It is possible to create an nginx pod together with new PV
    pv = cluster.pvs.add("dummy", pv_name, mount_path)
    pod = cluster.pods.create("nginx",
                              "test_nginx_pod",
                              pvs=[pv],
                              kube_type='Tiny',
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              open_all_ports=True)
    assert_eq(pv.exists(), True)
    assert_eq(pod.node, 'node2')

    c_id = pod.get_container_id(container_image='nginx')
    pod.docker_exec(c_id,
                    'echo -n TEST > {path}/test.txt'.format(path=mount_path))
    ret = pod.do_GET(path='/test.txt')
    assert_eq('TEST', ret)

    pod.change_kubetype(kube_type=1)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    ret = pod.do_GET(path='/test.txt')
    assert_eq('TEST', ret)
    assert_in(pod.node, ['node1', 'node4'])

    pod.change_kubetype(kube_type=2)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    ret = pod.do_GET(path='/test.txt')
    assert_eq('TEST', ret)
    assert_eq(pod.node, 'node3')

    # It's possible to remove PV created together with pod
    pod.delete()
    pv.delete()
    assert_eq(pv.exists(), not True)
def test_pv_states_and_deletion_via_kcli2(cluster):
    """
    TestRail Case: Different states of persistent volumes (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/145

    TestRail Case: Removing persistent volume (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/143

    TestRail Case: Try to remove persistent volume which is in use (kcli2)
    https://cloudlinux.testrail.net/index.php?/cases/view/146
    """
    pv1_name = 'disk1'
    pv2_name = 'disk2'
    pv1_mpath = '/nginxpv1'
    pv2_mpath = '/nginxpv2'
    pv1 = cluster.pvs.add('dummy', pv1_name, pv1_mpath)
    pv2 = cluster.pvs.add('dummy', pv2_name, pv2_mpath)
    pod_name = 'test_nginx_pv_states_via_kcli'
    pod = cluster.pods.create(
        'nginx', pod_name, pvs=[pv1, pv2], start=True,
        wait_for_status='running')

    log_debug('Get PV info via kcli2 using PV name', LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()
    # Get 'disk1' and 'disk2' ids
    pv1_id = pv1_info['id']
    pv2_id = pv2_info['id']

    # Disk1 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    # Disk2 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    log_debug('Get PV info via kcli2 using PV id', LOG)
    pv1_info = pv1.info(id=pv1_id)
    pv2_info = pv2.info(id=pv2_id)

    # Disk1 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    # Disk2 states: 'forbidDeletion': True, 'in_use': True
    # 'test_nginx_pv_states_via_kcli' in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=True),
                     pod_names=[pod_name])

    log_debug("Try to delete PVs 'disk1' and 'disk2' with pod 'running'", LOG)
    with assert_raises(NonZeroRetCodeException, 'Persistent disk is used.*'):
        pv1.delete(name=pv1_name)

    with assert_raises(NonZeroRetCodeException, 'Persistent disk is used.*'):
        pv2.delete(id=pv2_id)

    log_debug('List PVs using kcli2', LOG)
    pv_list = cluster.pvs.filter()

    log_debug("Make sure 'disk1' and 'disk2' are in the list", LOG)
    assert_eq(pv1_name in [pv['name'] for pv in pv_list], True)
    assert_eq(pv2_name in [pv['name'] for pv in pv_list], True)

    # Stop the pod
    pod.stop()
    pod.wait_for_status('stopped')

    log_debug("Try to delete PVs 'disk1' and 'disk2' with pod 'stopped'", LOG)
    with assert_raises(NonZeroRetCodeException, 'Volume can not be deleted.'
                       ' Reason: Persistent Disk is linked by pods:.*'):
        pv1.delete(name=pv1_name)

    with assert_raises(NonZeroRetCodeException, 'Volume can not be deleted.'
                       ' Reason: Persistent Disk is linked by pods:.*'):
        pv2.delete(id=pv2_id)

    # Get disk info once again
    log_debug("Pod is stopped and 'in_use' should become 'False'", LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()

    # Disk1 states: 'forbidDeletion': True, 'in_use': False
    # 'test_nginx_pv_states_via_kcli' should still be in 'disk1' linkedPods
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=True, in_use=False),
                     pod_names=[pod_name])
    # Disk2 states: 'forbidDeletion': True, 'in_use': False
    # 'test_nginx_pv_states_via_kcli' should still be in 'disk2' linkedPods
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=True, in_use=False),
                     pod_names=[pod_name])

    pod.delete()

    log_debug("Pod is deleted and both 'forbidDeletion' and 'in_use' should "
              "become 'False'", LOG)
    pv1_info = pv1.info()
    pv2_info = pv2.info()

    # Disk1 states: 'forbidDeletion': False, 'in_use': False
    assert_pv_states(pv1_info,
                     expected_states=dict(forbidDeletion=False, in_use=False),
                     pod_names=[])
    # Disk2 states: 'forbidDeletion': False, 'in_use': False
    assert_pv_states(pv2_info,
                     expected_states=dict(forbidDeletion=False, in_use=False),
                     pod_names=[])

    log_debug("Delete 'disk1' using '--name'", LOG)
    res = pv1.delete(name=pv1_name)
    assert_eq(res['status'], 'OK')

    log_debug("Delete 'disk2' using '--id'", LOG)
    res = pv2.delete(id=pv2_id)
    assert_eq(res['status'], 'OK')

    log_debug("Check that 'disk1' is deleted", LOG)
    with assert_raises(NonZeroRetCodeException, 'Error: Unknown name'):
        pv1.info()

    log_debug("Check that 'disk2' is deleted", LOG)
    with assert_raises(NonZeroRetCodeException, 'Error: Unknown name'):
        pv2.info()
示例#29
0
def test_a_pv_created_together_with_pod(cluster):
    # type: (KDIntegrationTestAPI) -> None
    # We have issue related to using non-unique disk names within
    # same CEPH pool (AC-3831). That is why name is randomized.
    pv_name = utils.gen_rnd_ceph_pv_name()

    mount_path = '/usr/share/nginx/html'

    # It is possible to create an nginx pod together with new PV
    pv = cluster.pvs.add("dummy", pv_name, mount_path)
    pod = cluster.pods.create("nginx",
                              "test_nginx_pod_1",
                              pvs=[pv],
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              ports_to_open=(80, ))
    utils.assert_eq(pv.exists(), True)

    c_id = pod.get_container_id(container_image='nginx')
    pod.docker_exec(c_id,
                    'echo -n TEST > {path}/test.txt'.format(path=mount_path))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)
    pod.delete()

    # It is possible to create an nginx pod using existing PV
    pod = cluster.pods.create("nginx",
                              "test_nginx_pod_2",
                              pvs=[pv],
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              ports_to_open=(80, ))
    ret = pod.do_GET(path='/test.txt')
    utils.assert_eq('TEST', ret)
    pod.delete()

    # It's possible to remove PV created together with pod
    pv.delete()
    utils.assert_eq(pv.exists(), False)

    # Create another PV with the same name
    pv = cluster.pvs.add('dummy', pv_name, mount_path)
    pod = cluster.pods.create('nginx',
                              'test_nginx_pod_3',
                              pvs=[pv],
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              ports_to_open=(80, ))
    utils.assert_eq(pv.exists(), True)

    # '/test.txt' is not on newly created PV, we expect HTTP Error 404
    with utils.assert_raises(HTTPError, 'HTTP Error 404: Not Found'):
        pod.do_GET(path='/test.txt')

    pod.delete()
    pv.delete()
    utils.assert_eq(pv.exists(), False)
示例#30
0
 def _generic_healthcheck(self):
     spec = self.get_spec()
     assert_eq(spec['kube_type'], kube_type_to_int(self.kube_type))
     assert_eq(spec['restartPolicy'], self.restart_policy)
     assert_eq(spec['status'], "running")
     return spec