def _validate_brick_placement_in_correct_zone_or_with_expand_pvc(
            self, heketi_zone_checking, pvc_name, zone_count, expand=False):
        online_nodes = self._get_online_nodes()

        for i in range(2):
            # Validate brick placement if heketi zone checking is 'strict'
            if heketi_zone_checking == 'strict':
                brick_hosts_ips = (
                    openshift_ops.get_gluster_host_ips_by_pvc_name(
                        self.node, pvc_name))
                placement_zones = {}
                for brick_host_ip in brick_hosts_ips:
                    for node_zone, node_ips in online_nodes:
                        if brick_host_ip not in node_ips:
                            continue
                        placement_zones[node_zone] = placement_zones.get(
                            node_zone, 0) + 1
                        break
                actual_zone_count = len(placement_zones)
                # NOTE(vponomar): '3' is default amount of volume replicas.
                # And it is just impossible to find more actual zones than
                # amount of replicas/bricks.
                brick_number = len(brick_hosts_ips)
                expected_zone_count = (
                    brick_number if brick_number < zone_count else zone_count)
                self.assertEqual(
                    expected_zone_count, actual_zone_count,
                    "PVC '%s' is incorrectly placed on the Heketi nodes "
                    "according to their zones. Expected '%s' unique zones, "
                    "got '%s'." % (pvc_name, zone_count, actual_zone_count))

            # Expand PVC if needed
            if expand:
                expand_size, expand = 2, False
                openshift_storage_libs.enable_pvc_resize(self.node)
                openshift_ops.resize_pvc(self.node, pvc_name, expand_size)
                openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size)
            else:
                break
    def test_dynamic_provisioning_glusterfile_gluster_pod_or_node_failure(
            self):
        """Create glusterblock PVC when gluster pod or node is down."""
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        self.create_storage_class()

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create app POD with attached volume
        pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod', pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(self.node, pod_name, timeout=60, wait_step=2)

        # Run IO in background
        io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
            pod_name, datafile_path)
        async_io = g.run_async(self.node, io_cmd, "root")

        # Check for containerized Gluster
        if self.is_containerized_gluster():
            # Pick up one of the hosts which stores PV brick (4+ nodes case)
            gluster_pod_data = get_gluster_pod_names_by_pvc_name(
                self.node, pvc_name)[0]

            # Delete glusterfs POD from chosen host and wait for
            # spawn of new one
            oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
            cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
                   "grep -v Terminating | awk '{print $1}'") % (
                       gluster_pod_data["pod_hostname"])
            for w in Waiter(600, 15):
                new_gluster_pod_name = self.cmd_run(cmd)
                if new_gluster_pod_name:
                    break
            if w.expired:
                error_msg = "exceeded timeout, new gluster pod not created"
                g.log.error(error_msg)
                raise AssertionError(error_msg)
            g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
            wait_for_pod_be_ready(self.node, new_gluster_pod_name)
        else:
            pvc_hosting_node_ip = get_gluster_host_ips_by_pvc_name(
                self.node, pvc_name)[0]
            heketi_nodes = heketi_node_list(self.heketi_client_node,
                                            self.heketi_server_url)
            node_ip_for_reboot = None
            for heketi_node in heketi_nodes:
                heketi_node_ip = heketi_node_info(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    heketi_node,
                    json=True)["hostnames"]["storage"][0]
                if heketi_node_ip == pvc_hosting_node_ip:
                    node_ip_for_reboot = heketi_node_ip
                    break

            if not node_ip_for_reboot:
                raise AssertionError(
                    "Gluster node IP %s not matched with heketi node %s" %
                    (pvc_hosting_node_ip, heketi_node_ip))

            node_reboot_by_command(node_ip_for_reboot)

        # Check that async IO was not interrupted
        ret, out, err = async_io.async_communicate()
        self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
    def test_check_pvc_placement_based_on_the_heketi_zones(
            self,
            zone_count,
            heketi_zone_checking,
            is_arbiter_vol,
            expand=False):
        # TODO(vponomar): implement setting env vars for the Heketi dc.

        # Check amount of available online heketi nodes
        online_nodes = self._get_online_nodes()
        node_count = len(online_nodes)

        # Check current amount of the Heketi zones
        actual_heketi_zones_amount = len(set([n[0] for n in online_nodes]))
        if zone_count != actual_heketi_zones_amount:
            if self.allow_heketi_zones_update:
                if zone_count > node_count:
                    self.skipTest("Not enough online nodes '%s' to test '%s' "
                                  "unique Heketi zones." %
                                  (node_count, zone_count))
                heketi_db_data = self._set_heketi_zones(zone_count)
                online_nodes = [
                    (n['Info']['zone'], n['Info']['hostnames']['storage'])
                    for n in heketi_db_data['nodeentries'].values()
                ]
            else:
                self.skipTest(
                    "Required amount of the Heketi zones (%s < %s) is not "
                    "satisfied and 'common.allow_heketi_zones_update' config "
                    "option is set to 'False'." %
                    (zone_count, actual_heketi_zones_amount))

        # Create storage class setting "user.heketi.zone-checking" option up
        prefix = "autotests-heketi-zones"
        sc_name = self.create_storage_class(
            sc_name_prefix=prefix,
            vol_name_prefix=prefix,
            allow_volume_expansion=expand,
            is_arbiter_vol=is_arbiter_vol,
            heketi_zone_checking=heketi_zone_checking)

        # Create PVC using above storage class
        pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix,
                                                sc_name=sc_name)

        for i in range(2):
            # Validate brick placement if heketi zone checking is 'strict'
            if heketi_zone_checking == 'strict':
                brick_hosts_ips = (
                    openshift_ops.get_gluster_host_ips_by_pvc_name(
                        self.node, pvc_name))
                placement_zones = {}
                for brick_host_ip in brick_hosts_ips:
                    for node_zone, node_ips in online_nodes:
                        if brick_host_ip not in node_ips:
                            continue
                        placement_zones[node_zone] = placement_zones.get(
                            node_zone, 0) + 1
                        break
                actual_zone_count = len(placement_zones)
                # NOTE(vponomar): '3' is default amount of volume replicas.
                # And it is just impossible to find more actual zones than
                # amount of replicas/bricks.
                brick_number = len(brick_hosts_ips)
                expected_zone_count = (
                    brick_number if brick_number < zone_count else zone_count)
                self.assertEqual(
                    expected_zone_count, actual_zone_count,
                    "PVC '%s' is incorrectly placed on the Heketi nodes "
                    "according to their zones. Expected '%s' unique zones, "
                    "got '%s'." % (pvc_name, zone_count, actual_zone_count))

            # Expand PVC if needed
            if expand:
                expand_size, expand = 2, False
                openshift_storage_libs.enable_pvc_resize(self.node)
                openshift_ops.resize_pvc(self.node, pvc_name, expand_size)
                openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size)
            else:
                break

        # Make sure that gluster vol has appropriate option set
        vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, pvc_name)
        self.assertIn('user.heketi.zone-checking', vol_info['options'])
        self.assertEqual(vol_info['options']['user.heketi.zone-checking'],
                         heketi_zone_checking)
        if is_arbiter_vol:
            self.assertIn('user.heketi.arbiter', vol_info['options'])
            self.assertEqual(vol_info['options']['user.heketi.arbiter'],
                             'true')

        # Create app DC with the above PVC
        self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)