def check_volume_mounts(cluster, pod, log_msg_prefix=''):
    utils.log_debug(
        "{}Run 'sudo zpool list' and make sure that '{}' is there".format(
            log_msg_prefix, ZFS_POOL), LOG)
    _, out, _ = cluster.ssh_exec(pod.node, 'zpool list', sudo=True)
    utils.assert_in(ZFS_POOL, out)

    for pv in pod.pvs:
        pod_owner = cluster.users.get(name=pod.owner)
        pv_mountpoint = os.path.join(ZFS_POOL_MOUNTPOINT,
                                     str(pod_owner.get('id')), pv.name)
        pool_path = os.path.join(ZFS_POOL, str(pod_owner.get('id')), pv.name)
        utils.log_debug(
            "{}Run 'zfs list' and check that '{}' is there".format(
                log_msg_prefix, pv.name), LOG)

        _, out, _ = cluster.ssh_exec(pod.node, 'zfs list', sudo=True)
        utils.assert_in(pv.name, out)

        # This may look redundant, but it's here only to be consistent with
        # test inside TestRail
        utils.log_debug(
            "{}Make sure that '{}' is mounted".format(log_msg_prefix, pv.name),
            LOG)
        assert_volume_mounts(cluster, pv_mountpoint, pod.node, utils.assert_in)

        utils.log_debug(
            "{}Make sure that '{}' has a correct mountpoint".format(
                log_msg_prefix, pv.name), LOG)

        assert_zfs_mount_points(cluster,
                                pool_path,
                                pv_mountpoint,
                                node=pod.node,
                                assertion=utils.assert_in)
def check_custom_port(cluster, port, proto, is_open=False):
    msg = "Check that port: '{proto}:{port}' on node '{node}' is '{state}'"
    for node in cluster.node_names:
        utils.log_debug(
            msg.format(proto=proto,
                       port=port,
                       node=node,
                       state='open' if is_open else 'closed'), LOG)
        node_ip = cluster.nodes.get_node_data(node).get('ip')
        if proto == 'tcp' and is_open:
            with paramiko_expect_http_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                res = unregistered_host_port_check(node_ip, port)
                utils.assert_in('Directory listing for /', res)
        elif proto == 'tcp' and not is_open:
            with paramiko_expect_http_server(cluster, node, port), \
                utils.assert_raises(NonZeroRetCodeException,
                                    expected_ret_codes=CURL_CONNECTION_ERRORS):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_port_check(node_ip, port)
        elif proto == 'udp' and is_open:
            with paramiko_expect_udp_server(cluster, node, port):
                sleep(SERVER_START_WAIT_TIMEOUT)
                out = unregistered_host_udp_check(node_ip, port)
                utils.assert_eq('PONG', out)
        else:
            with paramiko_expect_udp_server(cluster, node, port), \
                utils.assert_raises(
                    NonZeroRetCodeException, 'socket.timeout: timed out'):
                sleep(SERVER_START_WAIT_TIMEOUT)
                unregistered_host_udp_check(node_ip, port)
Ejemplo n.º 3
0
def test_pod_lands_on_correct_node_after_change_kubetype(cluster):
    for node, kube_type in [('node1', 'Standard'), ('node2', 'Tiny'),
                            ('node3', 'High memory')]:
        info = cluster.nodes.get_node_info(node)
        assert_eq(info['kube_id'], kube_type_to_int(kube_type))

    pod = cluster.pods.create("nginx",
                              "test_nginx_pod",
                              kube_type='Tiny',
                              wait_ports=True,
                              healthcheck=True,
                              wait_for_status='running',
                              open_all_ports=True)
    assert_eq(pod.node, 'node2')

    pod.change_kubetype(kube_type=1)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    pod.healthcheck()
    assert_eq(pod.node, 'node1')

    pod.change_kubetype(kube_type=2)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    pod.healthcheck()
    assert_in(pod.node, 'node3')
Ejemplo n.º 4
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/installer/')
     assert_in(
         u"Installing Gallery is easy.  "
         "We just need a place to put your photos", page)
Ejemplo n.º 5
0
 def healthcheck(self):
     if not (self.open_all_ports or self.ports):
         raise Exception(
             "Cannot perform nginx healthcheck without public IP")
     self._generic_healthcheck()
     # if shared IP is used, 404 is returned in a response to GET on
     # pod's domain name for up to 40 seconds after pod is started
     utils.retry(self.do_GET, tries=5, interval=10)
     utils.assert_in("Welcome to nginx!", self.do_GET())
Ejemplo n.º 6
0
 def healthcheck(self):
     self._generic_healthcheck()
     spec = self.get_spec()
     env = {e['name']: e['value'] for e in spec['containers'][0]['env']}
     user = env['POSTGRES_USER']
     passwd = env['POSTGRES_PASSWORD']
     db = DB(dbname=user,
             host=self.host,
             port=5432,
             user=user,
             passwd=passwd)
     sql = "create table test_table(id serial primary key, name varchar)"
     db.query(sql)
     assert_in('public.test_table', db.get_tables())
Ejemplo n.º 7
0
def test_pod_migrate_on_correct_node_after_change_kubetype(cluster):
    for node, kube_type in [('node1', 'Standard'), ('node2', 'Tiny'),
                            ('node3', 'High memory')]:
        info = cluster.nodes.get_node_info(node)
        assert_eq(info['kube_id'], kube_type_to_int(kube_type))

    # type: (KDIntegrationTestAPI) -> None
    # We have issue related to using non-unique disk names within
    # same CEPH pool (AC-3831). That is why name is randomized.
    pv_name = gen_rnd_ceph_pv_name()

    mount_path = '/usr/share/nginx/html'

    # It is possible to create an nginx pod together with new PV
    pv = cluster.pvs.add("dummy", pv_name, mount_path)
    pod = cluster.pods.create("nginx",
                              "test_nginx_pod",
                              pvs=[pv],
                              kube_type='Tiny',
                              start=True,
                              wait_for_status='running',
                              wait_ports=True,
                              open_all_ports=True)
    assert_eq(pv.exists(), True)
    assert_eq(pod.node, 'node2')

    c_id = pod.get_container_id(container_image='nginx')
    pod.docker_exec(c_id,
                    'echo -n TEST > {path}/test.txt'.format(path=mount_path))
    ret = pod.do_GET(path='/test.txt')
    assert_eq('TEST', ret)

    pod.change_kubetype(kube_type=1)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    ret = pod.do_GET(path='/test.txt')
    assert_eq('TEST', ret)
    assert_in(pod.node, ['node1', 'node4'])

    pod.change_kubetype(kube_type=2)
    pod.wait_for_status('running')
    pod.wait_for_ports()
    ret = pod.do_GET(path='/test.txt')
    assert_eq('TEST', ret)
    assert_eq(pod.node, 'node3')

    # It's possible to remove PV created together with pod
    pod.delete()
    pv.delete()
    assert_eq(pv.exists(), not True)
Ejemplo n.º 8
0
def test_pod_with_pv_restore(cluster):
    """Test that pod with PVs can be restored.

    :type cluster: KDIntegrationTestAPI
    """
    file_name = BACKUP_FILES[NGINX_WITH_PV]
    backup_url = "http://node1/backups"
    path_template = '{owner_id}/{volume_name}.tar.gz'
    # Test that pod with persistent volume can be restored
    pod = cluster.pods.restore(USER, file_path=file_name,
                               pv_backups_location=backup_url,
                               pv_backups_path_template=path_template,
                               wait_for_status="running")
    pod.wait_for_ports()
    assert_in("This page has been restored from tar.gz",
              pod.do_GET(path='/restored_location/'))
    old_id = pod.pod_id

    # Test that pod isn't removed if pod with same name is restored with
    # --force-not-delete flag
    with assert_raises(NonZeroRetCodeException,
                       'Pod with name .* already exists'):
        cluster.pods.restore(USER, file_path=file_name,
                             pv_backups_location=backup_url,
                             pv_backups_path_template=path_template,
                             flags="--force-not-delete")
    # If pod has't been restored, it's id should not be changed
    assert_eq(old_id, pod.pod_id)

    # Test that pod is removed together with disks if pod with same name
    # and same disks names is restored with --force-delete flag
    path_template = '{owner_name}/{volume_name}.zip'
    pod2 = cluster.pods.restore(USER, file_path=file_name,
                                pv_backups_location=backup_url,
                                pv_backups_path_template=path_template,
                                flags="--force-delete",
                                return_as_json=True)
    # If pod was restored than it's id should distinguish from id of pod
    # with same name, that has just been removed
    assert_not_eq(old_id, pod2.pod_id)
    pod2.wait_for_ports()
    assert_in("This page has been restored from zip",
              pod.do_GET(path='/restored_location/'))
Ejemplo n.º 9
0
    def _open_custom_ports(self):
        try:
            self.cluster.kdctl('allowed-ports open {} {}'.format(
                self.TCP_PORT_TO_OPEN, 'tcp'))
            self.cluster.kdctl('allowed-ports open {} {}'.format(
                self.UDP_PORT_TO_OPEN, 'udp'))
            _, out, _ = self.cluster.kdctl('allowed-ports list',
                                           out_as_dict=True)
            custom_ports = out['data']
            # Make sure that two ports are opened
            assert_eq(len(custom_ports), 2)

            # Make sure that both ports opened correctly
            assert_in(dict(port=self.TCP_PORT_TO_OPEN, protocol='tcp'),
                      custom_ports)
            assert_in(dict(port=self.UDP_PORT_TO_OPEN, protocol='udp'),
                      custom_ports)

        except (NonZeroRetCodeException, AssertionError) as e:
            log_debug("Couldn't open ports. Reason: {}".format(e))
Ejemplo n.º 10
0
    def create(self,
               image,
               name,
               kube_type="Standard",
               kubes=1,
               open_all_ports=False,
               ports_to_open=(),
               restart_policy="Always",
               pvs=None,
               start=True,
               wait_ports=False,
               healthcheck=False,
               wait_for_status=None,
               owner='test_user',
               password=None,
               domain=None):
        """
        Create new pod in kuberdock
        :param open_all_ports: if true, open all ports of image (does not mean
        these are Public IP ports, depends on a cluster setup)
        :param ports_to_open: if open_all_ports is False, open only the ports
        from this list
        :return: object via which Kuberdock pod can be managed
        """
        utils.assert_in(kube_type, ("Tiny", "Standard", "High memory"))
        utils.assert_in(restart_policy, ("Always", "Never", "OnFailure"))

        pod = KDPod.create(self.cluster, image, name, kube_type, kubes,
                           open_all_ports, restart_policy, pvs, owner, owner
                           or password, ports_to_open, domain)
        if start:
            pod.start()
        if wait_for_status:
            pod.wait_for_status(wait_for_status)
        if wait_ports:
            pod.wait_for_ports()
        if healthcheck:
            pod.healthcheck()
        return pod
def test_add_new_block_device(cluster):
    """
    Add a new block device into ZFS pool (Non-AWS)
    """
    for node in cluster.node_names:
        utils.log_debug("Add new block device to node '{}'".format(node), LOG)
        # NOTE: Generate a new file each time, so that if test is run on a
        # cluster multiple times nothing is broken, otherwise if we attach two
        # volumes with the same name, ZFS pool will be broken.
        # FIXME: Tried to detach the volume after the test is complete, but
        # couldn't figure out how to do it properly.
        of_path = utils.get_rnd_low_string(prefix='/tmp/dev', length=5)

        write_file_cmd = 'dd if=/dev/zero of="{}" bs=64M count=10'.format(
            of_path)
        cluster.ssh_exec(node, write_file_cmd)

        add_bl_device_cmd = ('node-storage add-volume --hostname {} '
                             '--devices {}'.format(node, of_path))
        cluster.manage(add_bl_device_cmd)

        utils.log_debug("Make sure a new block device is added", LOG)
        _, out, _ = cluster.ssh_exec(node, 'zpool status', sudo=True)
        utils.assert_in(of_path, out)
Ejemplo n.º 12
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/index.php/install/')
     assert_in(u"Magento is a trademark of Magento Inc.", page)
Ejemplo n.º 13
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/web/database/selector')
     assert_in(u"Fill in this form to create an Odoo database.", page)
Ejemplo n.º 14
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/wp-admin/install.php')
     assert_in(u"WordPress › Installation", page)
Ejemplo n.º 15
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/install.php')
     assert_in(u"Sugar Setup Wizard:", page)
     assert_in(u"Welcome to the SugarCRM", page)
Ejemplo n.º 16
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     assert_in("Welcome to your new DokuWiki",
               self.do_GET(path='/doku.php?id=wiki:welcome'))
Ejemplo n.º 17
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/install/index.php')
     assert_in(u"Welcome to phpBB3!", page)
Ejemplo n.º 18
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/server_databases.php')
     assert_in(u"information_schema", page)
     assert_in(u"mydata", page)
Ejemplo n.º 19
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET()
     assert_in(u"Koken - Setup", page)
Ejemplo n.º 20
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET()
     assert_in(u"ownCloud", page)
     assert_in(u"web services under your control", page)
Ejemplo n.º 21
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/install/index.php')
     assert_in(u"Please read the OpenCart licence agreement", page)
Ejemplo n.º 22
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/index.php?r=installer/welcome')
     assert_in(u"LimeSurvey installer", page)
Ejemplo n.º 23
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/install/index.php')
     assert_in(u"MyBB Installation Wizard", page)
Ejemplo n.º 24
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/admin/install.php')
     assert_in(u"Administration - Installation - MantisBT", page)
Ejemplo n.º 25
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     # Change assertion string after fix AC-4487
     page = self.do_GET(path='/core/install.php')
     assert_in("Drupal", page)
Ejemplo n.º 26
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     assert_in("Redmine", self.do_GET(port=self.HTTP_PORT))
Ejemplo n.º 27
0
 def healthcheck(self):
     self._generic_healthcheck()
     self.wait_http_resp()
     page = self.do_GET(path='/installation/index.php')
     assert_in(u"Joomla! - Open Source Content Management", page)