def test_volume_inconsistencies(self):
        # Examine Gluster cluster and Heketi that there is no inconsistencies
        out = heketi_ops.heketi_examine_gluster(
            self.heketi_client_node, self.heketi_server_url)
        if ("heketi volume list matches with volume list of all nodes"
                not in out['report']):
            self.skipTest(
                "heketi and Gluster are inconsistent to each other")

        # create volume
        vol = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol['id'])

        # delete volume from gluster cluster directly
        openshift_ops.cmd_run_on_gluster_pod_or_node(
            self.node,
            "gluster vol stop %s force --mode=script" % vol['name'])
        openshift_ops.cmd_run_on_gluster_pod_or_node(
            self.node,
            "gluster vol delete %s --mode=script" % vol['name'])

        # verify that heketi is reporting inconsistencies
        out = heketi_ops.heketi_examine_gluster(
            self.heketi_client_node, self.heketi_server_url)
        self.assertNotIn(
            "heketi volume list matches with volume list of all nodes",
            out['report'])
    def test_verify_arbiter_brick_able_to_contain_expected_amount_of_files(
            self, pvc_size_gb, avg_file_size):
        """Validate arbiter brick creation with different avg file size"""

        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc(pvc_size_gb)

        # Get volume info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        # Verify proportion of data and arbiter bricks
        bricks_info = (
            self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                vol_info))

        expected_file_amount = pvc_size_gb * 1024**2 / (avg_file_size or 64)
        expected_file_amount = (expected_file_amount /
                                bricks_info['arbiter_amount'])

        # Try to create expected amount of files on arbiter brick mount
        passed_arbiter_bricks = []
        not_found = "Mount Not Found"
        for brick in bricks_info['arbiter_list']:
            # "brick path" looks like following:
            # ip_addr:/path/to/vg/brick_unique_name/brick
            gluster_ip, brick_path = brick["name"].split(":")
            brick_path = brick_path[0:-6]

            cmd = "mount | grep %s || echo '%s'" % (brick_path, not_found)
            out = cmd_run_on_gluster_pod_or_node(self.node, cmd, gluster_ip)
            if out != not_found:
                cmd = (
                    "python -c \"["
                    "    open('%s/foo_file{0}'.format(i), 'a').close()"
                    "    for i in range(%s)"
                    "]\"" % (brick_path, expected_file_amount)
                )
                cmd_run_on_gluster_pod_or_node(self.node, cmd, gluster_ip)
                passed_arbiter_bricks.append(brick["name"])

        # Make sure all the arbiter bricks were checked
        for brick in bricks_info['arbiter_list']:
            self.assertIn(
                brick["name"], passed_arbiter_bricks,
                "Arbiter brick '%s' was not verified. Looks like it was "
                "not found on any of gluster PODs/nodes." % brick["name"])
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                    self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
 def _find_bricks(self, brick_paths, present):
     """Make sure that vol brick paths either exist or not in fstab file."""
     oc_node = self.ocp_master_node[0]
     cmd = (
         'bash -c "'
         'if [ -d "%s" ]; then echo present; else echo absent; fi"')
     g_hosts = list(g.config.get("gluster_servers", {}).keys())
     results = []
     assertion_method = self.assertIn if present else self.assertNotIn
     for brick_path in brick_paths:
         for g_host in g_hosts:
             out = openshift_ops.cmd_run_on_gluster_pod_or_node(
                 oc_node, cmd % brick_path, gluster_node=g_host)
             results.append(out)
         assertion_method('present', results)
    def test_check_max_brick_per_process(self):
        """Check if the max-brick process is set to 250"""

        cmd = ("gluster v get all all | grep cluster.max-bricks-per-process |"
               "awk '{print $2}'")

        # Get brick per process value
        bprocess_status = cmd_run_on_gluster_pod_or_node(
            self.ocp_master_node[0], cmd)

        # Validate the result
        err_msg = ("Got unexepeted max-brick process - '%s' "
                   "Expected max brick process is : 250") % (
                   bprocess_status)
        self.assertIn(bprocess_status, '250', err_msg)
    def test_glusterblock_logs_presence_verification(self):
        """Validate presence of glusterblock provisioner POD and it's status"""

        # Get glusterblock provisioner dc name
        cmd = ("oc get dc | awk '{ print $1 }' | "
               "grep -e glusterblock -e provisioner")
        dc_name = cmd_run(cmd, self.ocp_master_node[0], True)

        # Get glusterblock provisioner pod name and it's status
        gb_prov_name, gb_prov_status = oc_get_custom_resource(
            self.node, 'pod', custom=':.metadata.name,:.status.phase',
            selector='deploymentconfig=%s' % dc_name)[0]
        self.assertEqual(gb_prov_status, 'Running')

        # Create Secret, SC and PVC
        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # Get list of Gluster nodes
        g_hosts = list(g.config.get("gluster_servers", {}).keys())
        self.assertGreater(
            len(g_hosts), 0,
            "We expect, at least, one Gluster Node/POD:\n %s" % g_hosts)

        # Perform checks on Gluster nodes/PODs
        logs = ("gluster-block-configshell", "gluster-blockd")

        gluster_pods = oc_get_pods(
            self.ocp_client[0], selector="glusterfs-node=pod")
        if gluster_pods:
            cmd = "tail -n 5 /var/log/glusterfs/gluster-block/%s.log"
        else:
            cmd = "tail -n 5 /var/log/gluster-block/%s.log"
        for g_host in g_hosts:
            for log in logs:
                out = cmd_run_on_gluster_pod_or_node(
                    self.ocp_client[0], cmd % log, gluster_node=g_host)
                self.assertTrue(out, "Command '%s' output is empty." % cmd)
    def test_glusterblock_logs_presence_verification(self):
        """Validate presence of glusterblock provisioner POD and it's status"""
        gb_prov_cmd = ("oc get pods --all-namespaces "
                       "-l glusterfs=block-%s-provisioner-pod "
                       "-o=custom-columns=:.metadata.name,:.status.phase" % (
                           self.storage_project_name))
        ret, out, err = g.run(self.ocp_client[0], gb_prov_cmd, "root")

        self.assertEqual(ret, 0, "Failed to get Glusterblock provisioner POD.")
        gb_prov_name, gb_prov_status = out.split()
        self.assertEqual(gb_prov_status, 'Running')

        # Create Secret, SC and PVC
        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # Get list of Gluster nodes
        g_hosts = list(g.config.get("gluster_servers", {}).keys())
        self.assertGreater(
            len(g_hosts), 0,
            "We expect, at least, one Gluster Node/POD:\n %s" % g_hosts)

        # Perform checks on Gluster nodes/PODs
        logs = ("gluster-block-configshell", "gluster-blockd")

        gluster_pods = oc_get_pods(
            self.ocp_client[0], selector="glusterfs-node=pod")
        if gluster_pods:
            cmd = "tail -n 5 /var/log/glusterfs/gluster-block/%s.log"
        else:
            cmd = "tail -n 5 /var/log/gluster-block/%s.log"
        for g_host in g_hosts:
            for log in logs:
                out = cmd_run_on_gluster_pod_or_node(
                    self.ocp_client[0], cmd % log, gluster_node=g_host)
                self.assertTrue(out, "Command '%s' output is empty." % cmd)
def restart_gluster_vol_brick_processes(ocp_client_node, file_vol,
                                        gluster_nodes):
    """Restarts brick process of a file volume.

    Args:
        ocp_client_node (str): Node to execute OCP commands on.
        file_vol (str): file volume name.
        gluster_nodes (str/list): One or several IPv4 addresses of Gluster
            nodes, where 'file_vol' brick processes must be recreated.
    """
    if not isinstance(gluster_nodes, (list, set, tuple)):
        gluster_nodes = [gluster_nodes]

    # Get Gluster vol brick PIDs
    gluster_volume_status = get_gluster_vol_status(file_vol)
    pids = ()
    for gluster_node in gluster_nodes:
        pid = None
        for g_node, g_node_data in gluster_volume_status.items():
            if g_node != gluster_node:
                continue
            for process_name, process_data in g_node_data.items():
                if not process_name.startswith("/var"):
                    continue
                pid = process_data["pid"]
                # When birck is down, pid of the brick is returned as -1.
                # Which is unexepeted situation. So, add appropriate assertion.
                assert pid != "-1", (
                    "Got unexpected PID (-1) for '%s' gluster vol on '%s' "
                    "node." % file_vol, gluster_node)
        assert pid, ("Could not find 'pid' in Gluster vol data for '%s' "
                     "Gluster node. Data: %s" % (
                         gluster_node, gluster_volume_status))
        pids.append((gluster_node, pid))

    # Restart Gluster vol brick processes using found PIDs
    for gluster_node, pid in pids:
        cmd = "kill -9 %s" % pid
        cmd_run_on_gluster_pod_or_node(ocp_client_node, cmd, gluster_node)

    # Wait for Gluster vol brick processes to be recreated
    for gluster_node, pid in pids:
        killed_pid_cmd = "ps -eaf | grep %s | grep -v grep | awk '{print $2}'"
        _waiter = waiter.Waiter(timeout=60, interval=2)
        for w in _waiter:
            result = cmd_run_on_gluster_pod_or_node(
                ocp_client_node, killed_pid_cmd, gluster_node)
            if result.strip() == pid:
                continue
            g.log.info("Brick process '%s' was killed successfully on '%s'" % (
                pid, gluster_node))
            break
        if w.expired:
            error_msg = ("Process ID '%s' still exists on '%s' after waiting "
                         "for it 60 seconds to get killed." % (
                            pid, gluster_node))
            g.log.error(error_msg)
            raise exceptions.ExecutionError(error_msg)

    # Start volume after gluster vol brick processes recreation
    ret, out, err = volume_start(
        "auto_get_gluster_endpoint", file_vol, force=True)
    if ret != 0:
        err_msg = "Failed to start gluster volume %s on %s. error: %s" % (
            file_vol, gluster_node, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)
    def test_prometheous_kill_bhv_brick_process(self):
        """Validate kill brick process of block hosting
        volume with prometheus workload running"""

        # Add check for CRS version
        openshift_ops.switch_oc_project(self._master,
                                        self._registry_project_name)
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS"
                          " version check can not be implemented")

        # Get one of the prometheus pod name and respective pvc name
        openshift_ops.switch_oc_project(self._master,
                                        self._prometheus_project_name)
        prometheus_pods = openshift_ops.oc_get_pods(
            self._master, selector=self._prometheus_resources_selector)
        if not prometheus_pods:
            self.skipTest(prometheus_pods, "Skipping test as prometheus"
                          " pod is not present")

        # Validate iscsi and multipath
        prometheus_pod = list(prometheus_pods.keys())[0]
        pvc_name = openshift_ops.oc_get_custom_resource(
            self._master, "pod",
            ":.spec.volumes[*].persistentVolumeClaim.claimName",
            prometheus_pod)
        self.assertTrue(pvc_name, "Failed to get PVC name")
        pvc_name = pvc_name[0]
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            prometheus_pod,
            rtype='pod',
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)

        # Try to fetch metric from prometheus pod
        self._fetch_metric_from_promtheus_pod(
            metric='heketi_device_brick_count')

        # Kill the brick process of a BHV
        gluster_node = list(self._registry_servers_info.keys())[0]
        openshift_ops.switch_oc_project(self._master,
                                        self._registry_project_name)
        bhv_name = self.get_block_hosting_volume_by_pvc_name(
            pvc_name,
            heketi_server_url=self._registry_heketi_server_url,
            gluster_node=gluster_node,
            ocp_client_node=self._master)
        vol_status = gluster_ops.get_gluster_vol_status(bhv_name)
        gluster_node_ip, brick_pid = None, None
        for g_node, g_node_data in vol_status.items():
            for process_name, process_data in g_node_data.items():
                if process_name.startswith("/var"):
                    gluster_node_ip = g_node
                    brick_pid = process_data["pid"]
                    break
            if gluster_node_ip and brick_pid:
                break
        self.assertIsNotNone(brick_pid, "Could not find pid for brick")
        cmd = "kill -9 {}".format(brick_pid)
        openshift_ops.cmd_run_on_gluster_pod_or_node(self._master, cmd,
                                                     gluster_node_ip)
        self.addCleanup(self._guster_volume_cleanup, bhv_name)

        # Check if the brick-process has been killed
        killed_pid_cmd = ("ps -p {} -o pid --no-headers".format(brick_pid))
        try:
            openshift_ops.cmd_run_on_gluster_pod_or_node(
                self._master, killed_pid_cmd, gluster_node_ip)
        except exceptions.ExecutionError:
            g.log.info("Brick process {} was killed"
                       "successfully".format(brick_pid))

        # Try to fetch metric from prometheus pod
        openshift_ops.switch_oc_project(self._master,
                                        self._prometheus_project_name)
        self._fetch_metric_from_promtheus_pod(
            metric='heketi_device_brick_count')

        # Start the bhv using force
        openshift_ops.switch_oc_project(self._master,
                                        self._registry_project_name)
        start_vol, _, _ = volume_ops.volume_start(gluster_node_ip,
                                                  bhv_name,
                                                  force=True)
        self.assertFalse(
            start_vol, "Failed to start volume {}"
            " using force".format(bhv_name))

        # Validate iscsi and multipath
        openshift_ops.switch_oc_project(self._master,
                                        self._prometheus_project_name)
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            prometheus_pod,
            rtype='pod',
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)

        # Try to fetch metric from prometheus pod
        self._fetch_metric_from_promtheus_pod(
            metric='heketi_device_brick_count')
def restart_gluster_vol_brick_processes(ocp_client_node, file_vol,
                                        gluster_nodes):
    """Restarts brick process of a file volume.

    Args:
        ocp_client_node (str): Node to execute OCP commands on.
        file_vol (str): file volume name.
        gluster_nodes (str/list): One or several IPv4 addresses of Gluster
            nodes, where 'file_vol' brick processes must be recreated.
    """
    if not isinstance(gluster_nodes, (list, set, tuple)):
        gluster_nodes = [gluster_nodes]

    # Get Gluster vol brick PIDs
    gluster_volume_status = get_gluster_vol_status(file_vol)
    pids = []
    for gluster_node in gluster_nodes:
        pid = None
        for g_node, g_node_data in gluster_volume_status.items():
            if g_node != gluster_node:
                continue
            for process_name, process_data in g_node_data.items():
                if not process_name.startswith("/var"):
                    continue
                pid = process_data["pid"]
                # When birck is down, pid of the brick is returned as -1.
                # Which is unexepeted situation. So, add appropriate assertion.
                assert pid != "-1", (
                    "Got unexpected PID (-1) for '%s' gluster vol on '%s' "
                    "node." % file_vol, gluster_node)
        assert pid, ("Could not find 'pid' in Gluster vol data for '%s' "
                     "Gluster node. Data: %s" %
                     (gluster_node, gluster_volume_status))
        pids.append((gluster_node, pid))

    # Restart Gluster vol brick processes using found PIDs
    for gluster_node, pid in pids:
        cmd = "kill -9 %s" % pid
        cmd_run_on_gluster_pod_or_node(ocp_client_node, cmd, gluster_node)

    # Wait for Gluster vol brick processes to be recreated
    for gluster_node, pid in pids:
        killed_pid_cmd = "ps -eaf | grep %s | grep -v grep | awk '{print $2}'"
        _waiter = waiter.Waiter(timeout=60, interval=2)
        for w in _waiter:
            result = cmd_run_on_gluster_pod_or_node(ocp_client_node,
                                                    killed_pid_cmd,
                                                    gluster_node)
            if result.strip() == pid:
                continue
            g.log.info("Brick process '%s' was killed successfully on '%s'" %
                       (pid, gluster_node))
            break
        if w.expired:
            error_msg = ("Process ID '%s' still exists on '%s' after waiting "
                         "for it 60 seconds to get killed." %
                         (pid, gluster_node))
            g.log.error(error_msg)
            raise exceptions.ExecutionError(error_msg)

    # Start volume after gluster vol brick processes recreation
    ret, out, err = volume_start("auto_get_gluster_endpoint",
                                 file_vol,
                                 force=True)
    if ret != 0:
        err_msg = "Failed to start gluster volume %s on %s. error: %s" % (
            file_vol, gluster_node, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)
Esempio n. 11
0
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path', r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(self.ocp_client[0],
                                                 self.heketi_server_url,
                                                 vol_id,
                                                 json=True)['name']
            self.assertEqual(
                pv_vol_name, heketi_vol_name,
                'Volume with vol_id = %s not found'
                'in heketidb' % vol_id)
            self.assertEqual(
                heketi_vol_name, 'vol_' + vol_id,
                'Volume with vol_id = %s have a'
                'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100", "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def test_expand_arbiter_volume_according_to_avg_file_size(
            self, avg_file_size, expected_brick_size, vol_expand=True):
        """Validate expansion of arbiter volume with diff avg file size"""
        data_hosts = []
        arbiter_hosts = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            (data_hosts.append(node_info['hostnames']['storage'][0])
                if i < 2 else
                arbiter_hosts.append(node_info['hostnames']['storage'][0]))
            self.assertEqual(
                node_info['tags']['arbiter'],
                'disabled' if i < 2 else 'required')

        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, allow_volume_expansion=True,
            arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_expanded = False

        for i in range(2):
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            bricks = (
                self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                    vol_info,
                    arbiter_bricks=(2 if vol_expanded else 1),
                    data_bricks=(4 if vol_expanded else 2)
                )
            )

            # verify arbiter bricks lies on arbiter hosts
            for brick in bricks['arbiter_list']:
                ip, brick_name = brick['name'].split(':')
                self.assertIn(ip, arbiter_hosts)
                # verify the size of arbiter brick
                cmd = "df -h %s --output=size | tail -1" % brick_name
                out = cmd_run_on_gluster_pod_or_node(self.node, cmd, ip)
                self.assertEqual(out, expected_brick_size)
            # verify that data bricks lies on data hosts
            for brick in bricks['data_list']:
                self.assertIn(brick['name'].split(':')[0], data_hosts)

            if vol_expanded or not vol_expand:
                break
            # Expand PVC and verify the size
            pvc_size = 2
            resize_pvc(self.node, self.pvc_name, pvc_size)
            verify_pvc_size(self.node, self.pvc_name, pvc_size)
            vol_expanded = True
    def test_targetcli_when_block_hosting_volume_down(self):
        """Validate no inconsistencies occur in targetcli when block volumes
           are created with one block hosting volume down."""
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        cmd = ("targetcli ls | egrep '%s' || echo unavailable")
        error_msg = ("targetcli has inconsistencies when block devices are "
                     "created with one block hosting volume %s is down")

        # Delete BHV which has no BV or fill it completely
        bhv_list = get_block_hosting_volume_list(h_node, h_server).keys()
        for bhv in bhv_list:
            bhv_info = heketi_volume_info(h_node, h_server, bhv, json=True)
            if not bhv_info["blockinfo"].get("blockvolume", []):
                heketi_volume_delete(h_node, h_server, bhv)
                continue
            free_size = bhv_info["blockinfo"].get("freesize", 0)
            if free_size:
                bv = heketi_volume_create(h_node,
                                          h_server,
                                          free_size,
                                          json=True)
                self.addCleanup(heketi_volume_delete, h_node, h_server,
                                bv["id"])

        # Create BV
        bv = heketi_blockvolume_create(h_node, h_server, 2, json=True)
        self.addCleanup(heketi_blockvolume_delete, h_node, h_server, bv["id"])

        # Bring down BHV
        bhv_name = get_block_hosting_volume_name(h_node, h_server, bv["id"])
        ret, out, err = volume_stop("auto_get_gluster_endpoint", bhv_name)
        if ret != 0:
            err_msg = "Failed to stop gluster volume %s. error: %s" % (
                bhv_name, err)
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        self.addCleanup(podcmd.GlustoPod()(volume_start),
                        "auto_get_gluster_endpoint", bhv_name)

        ocp_node = self.ocp_master_node[0]
        gluster_block_svc = "gluster-block-target"
        self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                        ocp_node,
                        gluster_block_svc,
                        "active",
                        "exited",
                        gluster_node=self.gluster_servers[0])
        self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                        gluster_block_svc, self.gluster_servers[0])
        for condition in ("continue", "break"):
            restart_service_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                gluster_node=self.gluster_servers[0])
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                "active",
                "exited",
                gluster_node=self.gluster_servers[0])

            targetcli = cmd_run_on_gluster_pod_or_node(ocp_node,
                                                       cmd % bv["id"],
                                                       self.gluster_servers[0])
            if condition == "continue":
                self.assertEqual(targetcli, "unavailable",
                                 error_msg % bhv_name)
            else:
                self.assertNotEqual(targetcli, "unavailable",
                                    error_msg % bhv_name)
                break

            # Bring up the same BHV
            ret, out, err = volume_start("auto_get_gluster_endpoint", bhv_name)
            if ret != 0:
                err = "Failed to start gluster volume %s on %s. error: %s" % (
                    bhv_name, h_node, err)
                raise exceptions.ExecutionError(err)