def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod
        """

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        dc_name = "glusterblock-%s-provisioner-dc" % self.storage_project_name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url,
                                  vol_info['id'])
Esempio n. 2
0
    def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod
        """

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5,
                                             json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        dc_name = "glusterblock-%s-provisioner-dc" % self.storage_project_name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2,
                                             json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_info['id'])
    def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod."""

        # Get glusterblock provisioner dc name
        cmd = ("oc get dc | awk '{ print $1 }' | "
               "grep -e glusterblock -e provisioner")
        dc_name = command.cmd_run(cmd, self.ocp_master_node[0], True)

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url,
                                  vol_info['id'])
Esempio n. 4
0
    def test_blockvolume_create_no_free_space(self):
        """Validate error is returned when free capacity is exhausted"""

        # Create first small blockvolume
        blockvol1 = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(blockvol1, "Failed to create block volume.")
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, blockvol1['id'])

        # Get info about block hosting volumes
        file_volumes = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)
        self.assertTrue(file_volumes)
        self.assertIn("volumes", file_volumes)
        self.assertTrue(file_volumes["volumes"])
        max_block_hosting_vol_size, file_volumes_debug_info = 0, []
        for vol_id in file_volumes["volumes"]:
            vol = heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                vol_id, json=True)
            current_block_hosting_vol_size = vol.get('size', 0)
            if current_block_hosting_vol_size > max_block_hosting_vol_size:
                max_block_hosting_vol_size = current_block_hosting_vol_size
            if current_block_hosting_vol_size:
                file_volumes_debug_info.append(six.text_type({
                    'id': vol.get('id', '?'),
                    'name': vol.get('name', '?'),
                    'size': current_block_hosting_vol_size,
                    'blockinfo': vol.get('blockinfo', '?'),
                }))
        self.assertGreater(max_block_hosting_vol_size, 0)

        # Try to create blockvolume with size bigger than available
        too_big_vol_size = max_block_hosting_vol_size + 1
        try:
            blockvol2 = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                too_big_vol_size, json=True)
        except AssertionError:
            return

        if blockvol2 and blockvol2.get('id'):
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, blockvol2['id'])
        block_hosting_vol = heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url,
            blockvol2.get('blockhostingvolume'), json=True)
        self.assertGreater(
            block_hosting_vol.get('size', -2), blockvol2.get('size', -1),
            ("Block volume unexpectedly was created. "
             "Calculated 'max free size' is '%s'.\nBlock volume info is: %s \n"
             "File volume info, which hosts block volume: \n%s,"
             "Block hosting volumes which were considered: \n%s" % (
                 max_block_hosting_vol_size, blockvol2, block_hosting_vol,
                 '\n'.join(file_volumes_debug_info))))
    def test_blockvolume_create_no_free_space(self):
        """Validate error is returned when free capacity is exhausted"""

        # Create first small blockvolume
        blockvol1 = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(blockvol1, "Failed to create block volume.")
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, blockvol1['id'])

        # Get info about block hosting volumes
        file_volumes = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)
        self.assertTrue(file_volumes)
        self.assertIn("volumes", file_volumes)
        self.assertTrue(file_volumes["volumes"])
        max_block_hosting_vol_size, file_volumes_debug_info = 0, []
        for vol_id in file_volumes["volumes"]:
            vol = heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                vol_id, json=True)
            current_block_hosting_vol_size = vol.get('size', 0)
            if current_block_hosting_vol_size > max_block_hosting_vol_size:
                max_block_hosting_vol_size = current_block_hosting_vol_size
            if current_block_hosting_vol_size:
                file_volumes_debug_info.append(six.text_type({
                    'id': vol.get('id', '?'),
                    'name': vol.get('name', '?'),
                    'size': current_block_hosting_vol_size,
                    'blockinfo': vol.get('blockinfo', '?'),
                }))
        self.assertGreater(max_block_hosting_vol_size, 0)

        # Try to create blockvolume with size bigger than available
        too_big_vol_size = max_block_hosting_vol_size + 1
        try:
            blockvol2 = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                too_big_vol_size, json=True)
        except ExecutionError:
            return

        if blockvol2 and blockvol2.get('id'):
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, blockvol2['id'])
        block_hosting_vol = heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url,
            blockvol2.get('blockhostingvolume'), json=True)
        self.assertGreater(
            block_hosting_vol.get('size', -2), blockvol2.get('size', -1),
            ("Block volume unexpectedly was created. "
             "Calculated 'max free size' is '%s'.\nBlock volume info is: %s \n"
             "File volume info, which hosts block volume: \n%s,"
             "Block hosting volumes which were considered: \n%s" % (
                 max_block_hosting_vol_size, blockvol2, block_hosting_vol,
                 '\n'.join(file_volumes_debug_info))))
    def test_compare_block_volumes(self):
        """Validate blockvolume count using heketi gluster examine"""
        # Create some blockvolumes
        size = 1
        for i in range(5):
            volume = heketi_ops.heketi_blockvolume_create(
                self.heketi_client_node,
                self.heketi_server_url,
                size,
                json=True)['id']
            self.addCleanup(heketi_ops.heketi_blockvolume_delete,
                            self.heketi_client_node, self.heketi_server_url,
                            volume)

        # Get the list of blockvolumes from heketi gluster examine
        out = heketi_ops.heketi_examine_gluster(self.heketi_client_node,
                                                self.heketi_server_url)
        examine_blockvolumes, clusters = [], out['heketidb']['clusterentries']
        for cluster in clusters.values():
            examine_blockvolumes += cluster['Info']['blockvolumes']

        # Get list of blockvolume from heketi blockvolume list
        heketi_block_volumes = heketi_ops.heketi_blockvolume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)['blockvolumes']

        # Compare volume list
        msg = ("Heketi blockvolume list {} and list of blockvolumes in heketi "
               "gluster examine {} are not same".format(
                   heketi_block_volumes, examine_blockvolumes))
        self.assertEqual(heketi_block_volumes, examine_blockvolumes, msg)
    def test_create_blockvolume_with_different_auth_values(self, auth_value):
        """To validate block volume creation with different auth values"""
        # Block volume create with auth enabled
        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              auth=auth_value,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_vol["id"])

        # Verify username and password are present
        block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 block_vol["id"],
                                                 json=True)
        assertion_func = (self.assertNotEqual
                          if auth_value else self.assertEqual)
        assertion_msg_part = "not " if auth_value else ""
        assertion_func(block_vol_info["blockvolume"]["username"], "",
                       ("Username is %spresent in %s",
                        (assertion_msg_part, block_vol["id"])))
        assertion_func(block_vol_info["blockvolume"]["password"], "",
                       ("Password is %spresent in %s",
                        (assertion_msg_part, block_vol["id"])))
    def test_block_volume_delete(self):
        """Validate deletion of gluster-block volume and capacity of used pool
        """
        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        block_vol["id"],
                        raise_on_error=False)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url,
                                  block_vol["id"],
                                  json=True)

        volume_list = heketi_blockvolume_list(self.heketi_client_node,
                                              self.heketi_server_url,
                                              json=True)
        self.assertNotIn(
            block_vol["id"], volume_list["blockvolumes"],
            "The block volume has not been successfully deleted,"
            " ID is %s" % block_vol["id"])
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(ExecutionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(AssertionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
    def test_create_block_vol_after_host_vol_creation(self):
        """Validate block-device after manual block hosting volume creation
           using heketi
        """
        block_host_create_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 5,
            json=True, block=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, block_host_create_info["id"])

        block_vol = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, block_vol["id"])
    def test_block_volume_delete(self):
        """Validate deletion of gluster-block volume and capacity of used pool
        """
        block_vol = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, block_vol["id"], raise_on_error=False)

        heketi_blockvolume_delete(
            self.heketi_client_node, self.heketi_server_url,
            block_vol["id"], json=True)

        volume_list = heketi_blockvolume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)
        self.assertNotIn(block_vol["id"], volume_list["blockvolumes"],
                         "The block volume has not been successfully deleted,"
                         " ID is %s" % block_vol["id"])
    def test_create_block_vol_after_host_vol_creation(self):
        """Validate block-device after manual block hosting volume creation
           using heketi
        """
        block_host_create_info = heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      5,
                                                      json=True,
                                                      block=True)
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_host_create_info["id"])

        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_vol["id"])
    def test_block_volume_create_with_name(self):
        """Validate creation of block volume with name"""
        vol_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              name=vol_name,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_vol["id"])

        # check volume name through heketi-cli
        block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 block_vol["id"],
                                                 json=True)
        self.assertEqual(block_vol_info["name"], vol_name,
                         ("Block volume Names are not same %s as %s",
                          (block_vol_info["name"], vol_name)))
    def test_block_host_volume_delete_block_volume_delete(self):
        """Validate block volume and BHV removal using heketi"""
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skipping the test since number of nodes"
                          "online are less than 3")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        if free_space_available < default_bhv_size:
            self.skipTest("Skipping the test since free_space_available %s"
                          "is less than the default_bhv_size %s" %
                          (free_space_available, default_bhv_size))
        h_volume_name = ("autotests-heketi-volume-%s" % utils.get_random_str())
        block_host_create_info = self.create_heketi_volume_with_name_and_wait(
            h_volume_name, default_bhv_size, json=True, block=True)

        block_vol_size = block_host_create_info["blockinfo"]["freesize"]
        block_hosting_vol_id = block_host_create_info["id"]
        block_vol_info = {"blockhostingvolume": "init_value"}
        while (block_vol_info['blockhostingvolume'] != block_hosting_vol_id):
            block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  block_vol_size,
                                                  json=True,
                                                  ha=3,
                                                  auth=True)
            self.addCleanup(heketi_blockvolume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_vol["id"],
                            raise_on_error=True)
            block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                     self.heketi_server_url,
                                                     block_vol["id"],
                                                     json=True)
        bhv_info = heketi_volume_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      block_hosting_vol_id,
                                      json=True)
        self.assertIn(block_vol_info["id"],
                      bhv_info["blockinfo"]["blockvolume"])
    def test_block_volume_list(self):
        """Validate heketi blockvolume list command works as expected"""
        created_vol_ids = []
        for count in range(3):
            block_vol = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url, 1, json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, block_vol["id"])

            created_vol_ids.append(block_vol["id"])

        volumes = heketi_blockvolume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)

        existing_vol_ids = volumes.values()[0]
        for vol_id in created_vol_ids:
            self.assertIn(vol_id, existing_vol_ids,
                          "Block vol with '%s' ID is absent in the "
                          "list of block volumes." % vol_id)
    def test_block_volume_list(self):
        """Validate heketi blockvolume list command works as expected"""
        created_vol_ids = []
        for count in range(3):
            block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  1,
                                                  json=True)
            self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                            self.heketi_server_url, block_vol["id"])

            created_vol_ids.append(block_vol["id"])

        volumes = heketi_blockvolume_list(self.heketi_client_node,
                                          self.heketi_server_url,
                                          json=True)

        existing_vol_ids = list(volumes.values())[0]
        for vol_id in created_vol_ids:
            self.assertIn(
                vol_id, existing_vol_ids,
                "Block vol with '%s' ID is absent in the "
                "list of block volumes." % vol_id)
    def test_heket_block_volume_info_with_gluster_block_volume_info(self):
        """Verify heketi block volume info with the backend gluster
        block volume info
        """
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        vol_size = 1
        h_block_vol = heketi_blockvolume_create(h_node,
                                                h_server,
                                                vol_size,
                                                auth=True,
                                                json=True)
        self.addCleanup(heketi_blockvolume_delete, h_node, h_server,
                        h_block_vol["id"])

        h_block_vol["blockvolume"]["hosts"].sort()
        h_block_vol_gbid = h_block_vol["blockvolume"]["username"]
        h_block_vol_name = h_block_vol["name"]
        h_block_vol_paswd = h_block_vol["blockvolume"]["password"]
        h_block_vol_hosts = h_block_vol["blockvolume"]["hosts"]
        h_block_host_vol_id = h_block_vol["blockhostingvolume"]
        h_block_vol_ha = h_block_vol["hacount"]

        # Fetch heketi blockhostingvolume info
        h_bhv_info = heketi_volume_info(h_node,
                                        h_server,
                                        h_block_host_vol_id,
                                        json=True)
        err_msg = "Failed to get heketi blockhostingvolume info for {}"
        self.assertTrue(h_bhv_info, err_msg.format(h_block_host_vol_id))
        h_bhv_name = h_bhv_info['name']
        err_msg = "Failed to get heketi BHV name for heketi blockvolume Id {}"
        self.assertTrue(h_bhv_name, err_msg.format(h_block_host_vol_id))

        # Get gluster blockvolume list
        g_block_vol_list = get_block_list('auto_get_gluster_endpoint',
                                          h_bhv_name)
        err_msg = ("Failed to get gluter blockvolume list {}".format(
            g_block_vol_list))
        self.assertTrue(g_block_vol_list, err_msg)

        err_msg = (
            "Heketi block volume {} not present in gluster blockvolumes {}".
            format(h_block_vol_name, g_block_vol_list))
        self.assertIn(h_block_vol_name, g_block_vol_list, err_msg)

        g_block_info = get_block_info('auto_get_gluster_endpoint', h_bhv_name,
                                      h_block_vol_name)
        g_block_info["EXPORTED ON"].sort()

        g_block_vol_hosts = g_block_info["EXPORTED ON"]
        g_block_vol_gbid = g_block_info["GBID"]
        g_block_vol_name = g_block_info["NAME"]
        g_block_vol_paswd = g_block_info["PASSWORD"]
        g_block_vol_id = g_block_info["VOLUME"][4:]
        g_block_vol_ha = g_block_info["HA"]

        # verfiy block device info and glusterblock volume info
        err_msg = ("Did not match {} from heketi {} and gluster {} side")
        self.assertEqual(
            h_block_vol_gbid, g_block_vol_gbid,
            err_msg.format("GBID", h_block_vol_gbid, g_block_vol_gbid,
                           err_msg))
        self.assertEqual(
            h_block_vol_name, g_block_vol_name,
            err_msg.format("blockvolume", h_block_vol_name, g_block_vol_name,
                           err_msg))
        self.assertEqual(
            h_block_vol_paswd, g_block_vol_paswd,
            err_msg.format("password", h_block_vol_paswd, g_block_vol_paswd,
                           err_msg))
        self.assertEqual(
            h_block_vol_hosts, g_block_vol_hosts,
            err_msg.format("hosts", h_block_vol_hosts, g_block_vol_hosts,
                           err_msg))
        self.assertEqual(
            h_block_host_vol_id, g_block_vol_id,
            err_msg.format("blockhost vol id", h_block_host_vol_id,
                           g_block_vol_id, err_msg))
        self.assertEqual(
            h_block_vol_ha, g_block_vol_ha,
            err_msg.format("ha", h_block_vol_ha, g_block_vol_ha, err_msg))
    def test_targetcli_when_block_hosting_volume_down(self):
        """Validate no inconsistencies occur in targetcli when block volumes
           are created with one block hosting volume down."""
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        cmd = ("targetcli ls | egrep '%s' || echo unavailable")
        error_msg = ("targetcli has inconsistencies when block devices are "
                     "created with one block hosting volume %s is down")

        # Delete BHV which has no BV or fill it completely
        bhv_list = get_block_hosting_volume_list(h_node, h_server).keys()
        for bhv in bhv_list:
            bhv_info = heketi_volume_info(h_node, h_server, bhv, json=True)
            if not bhv_info["blockinfo"].get("blockvolume", []):
                heketi_volume_delete(h_node, h_server, bhv)
                continue
            free_size = bhv_info["blockinfo"].get("freesize", 0)
            if free_size:
                bv = heketi_volume_create(h_node,
                                          h_server,
                                          free_size,
                                          json=True)
                self.addCleanup(heketi_volume_delete, h_node, h_server,
                                bv["id"])

        # Create BV
        bv = heketi_blockvolume_create(h_node, h_server, 2, json=True)
        self.addCleanup(heketi_blockvolume_delete, h_node, h_server, bv["id"])

        # Bring down BHV
        bhv_name = get_block_hosting_volume_name(h_node, h_server, bv["id"])
        ret, out, err = volume_stop("auto_get_gluster_endpoint", bhv_name)
        if ret != 0:
            err_msg = "Failed to stop gluster volume %s. error: %s" % (
                bhv_name, err)
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        self.addCleanup(podcmd.GlustoPod()(volume_start),
                        "auto_get_gluster_endpoint", bhv_name)

        ocp_node = self.ocp_master_node[0]
        gluster_block_svc = "gluster-block-target"
        self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                        ocp_node,
                        gluster_block_svc,
                        "active",
                        "exited",
                        gluster_node=self.gluster_servers[0])
        self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                        gluster_block_svc, self.gluster_servers[0])
        for condition in ("continue", "break"):
            restart_service_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                gluster_node=self.gluster_servers[0])
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                "active",
                "exited",
                gluster_node=self.gluster_servers[0])

            targetcli = cmd_run_on_gluster_pod_or_node(ocp_node,
                                                       cmd % bv["id"],
                                                       self.gluster_servers[0])
            if condition == "continue":
                self.assertEqual(targetcli, "unavailable",
                                 error_msg % bhv_name)
            else:
                self.assertNotEqual(targetcli, "unavailable",
                                    error_msg % bhv_name)
                break

            # Bring up the same BHV
            ret, out, err = volume_start("auto_get_gluster_endpoint", bhv_name)
            if ret != 0:
                err = "Failed to start gluster volume %s on %s. error: %s" % (
                    bhv_name, h_node, err)
                raise exceptions.ExecutionError(err)
    def test_create_max_num_blockhostingvolumes(self):
        num_of_bv = 10
        new_bhv_list, bv_list, g_nodes = [], [], []
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skip the test case since number of"
                          "online nodes is less than 3.")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        # Get existing list of BHV's
        existing_bhv_list = get_block_hosting_volume_list(
            self.heketi_client_node, self.heketi_server_url)

        # Skip the test if available space is less than default_bhv_size
        if free_space_available < default_bhv_size:
            self.skipTest("Skip the test case since free_space_available %s"
                          "is less than space_required_for_bhv %s ." %
                          (free_space_available, default_bhv_size))

        # Create BHV's
        while free_space_available > default_bhv_size:
            block_host_create_info = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url,
                default_bhv_size,
                json=True,
                block=True)
            if block_host_create_info["id"] not in existing_bhv_list.keys():
                new_bhv_list.append(block_host_create_info["id"])
            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_host_create_info["id"],
                            raise_on_error=False)
            block_vol_size = int(
                block_host_create_info["blockinfo"]["freesize"] / num_of_bv)

            # Create specified number of BV's in BHV's created
            for i in range(0, num_of_bv):
                block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      block_vol_size,
                                                      json=True,
                                                      ha=3,
                                                      auth=True)
                self.addCleanup(heketi_blockvolume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                block_vol["id"],
                                raise_on_error=False)
                bv_list.append(block_vol["id"])
            free_space_available = int(free_space_available - default_bhv_size)

        # Get gluster node ips
        h_nodes_ids = heketi_node_list(self.heketi_client_node,
                                       self.heketi_server_url)
        for h_node in h_nodes_ids[:2]:
            g_node = heketi_node_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      h_node,
                                      json=True)
            g_nodes.append(g_node['hostnames']['manage'][0])

        # Check if there is no crash in gluster related services & heketi
        services = (("glusterd", "running"), ("gluster-blockd", "running"),
                    ("tcmu-runner", "running"), ("gluster-block-target",
                                                 "exited"))
        for g_node in g_nodes:
            for service, state in services:
                wait_for_service_status_on_gluster_pod_or_node(
                    self.ocp_client[0],
                    service,
                    'active',
                    state,
                    g_node,
                    raise_on_error=False)
            out = hello_heketi(self.heketi_client_node, self.heketi_server_url)
            self.assertTrue(
                out, "Heketi server %s is not alive" % self.heketi_server_url)

        # Delete all the BHV's and BV's created
        for bv_volume in bv_list:
            heketi_blockvolume_delete(self.heketi_client_node,
                                      self.heketi_server_url, bv_volume)

        # Check if any blockvolume exist in heketi & gluster
        for bhv_volume in new_bhv_list[:]:
            heketi_vol_info = heketi_volume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 bhv_volume,
                                                 json=True)
            self.assertNotIn("blockvolume",
                             heketi_vol_info["blockinfo"].keys())
            gluster_vol_info = get_block_list('auto_get_gluster_endpoint',
                                              volname="vol_%s" % bhv_volume)
            self.assertIsNotNone(gluster_vol_info,
                                 "Failed to get volume info %s" % bhv_volume)
            new_bhv_list.remove(bhv_volume)
            for blockvol in gluster_vol_info:
                self.assertNotIn("blockvol_", blockvol)
                heketi_volume_delete(self.heketi_client_node,
                                     self.heketi_server_url, bhv_volume)

        # Check if all blockhosting volumes are deleted from heketi
        self.assertFalse(new_bhv_list)
    def test_heketi_device_replacement_in_node(self):
        """Validate device replacement operation on single node"""

        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        try:
            gluster_server_0 = list(g.config["gluster_servers"].values())[0]
            manage_hostname = gluster_server_0["manage"]
            add_device_name = gluster_server_0["additional_devices"][0]
        except (KeyError, IndexError):
            self.skipTest(
                "Additional disk is not specified for node with following "
                "hostnames and IP addresses: {}, {}".format(
                    gluster_server_0.get('manage', '?'),
                    gluster_server_0.get('storage', '?')))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_client, h_server, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes info
        node_id_list = heketi_node_list(h_client, h_server)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node_id in node_id_list[3:]:
                heketi_node_disable(h_client, h_server, node_id)
                self.addCleanup(heketi_node_enable, h_client, h_server,
                                node_id)

        # Create volume when 3 nodes are online
        vol_size, vol_count = 2, 4
        for _ in range(vol_count):
            vol_info = heketi_blockvolume_create(h_client,
                                                 h_server,
                                                 vol_size,
                                                 json=True)
            self.addCleanup(heketi_blockvolume_delete, h_client, h_server,
                            vol_info['id'])

        # Get node ID of the Gluster hostname
        topology_info = heketi_topology_info(h_client, h_server, json=True)
        self.assertIsNotNone(topology_info, "Failed to get topology info")
        self.assertIn("clusters", topology_info.keys(),
                      "Failed to get cluster "
                      "details from topology info")
        node_list = topology_info["clusters"][0]["nodes"]
        self.assertTrue(node_list,
                        "Cluster info command returned empty list of nodes")

        node_id = None
        for node in node_list:
            if manage_hostname == node['hostnames']["manage"][0]:
                node_id = node["id"]
                break
        self.assertTrue(
            node_id, "Failed to get  node info for node  id '{}'".format(
                manage_hostname))

        # Add extra device, then remember it's ID and size
        device_id_new, device_size_new = self._add_heketi_device(
            add_device_name, node_id)

        # Remove one of the existing devices on node except new device
        device_name, device_id = None, None
        node_info_after_addition = heketi_node_info(h_client,
                                                    h_server,
                                                    node_id,
                                                    json=True)
        for device in node_info_after_addition["devices"]:
            if (device["name"] != add_device_name
                    and device["storage"]["total"] == device_size_new):
                device_name = device["name"]
                device_id = device["id"]
                break

        self.assertIsNotNone(device_name, "Failed to get device name")
        self.assertIsNotNone(device_id, "Failed to get device id")
        self.addCleanup(heketi_device_enable,
                        h_client,
                        h_server,
                        device_id,
                        raise_on_error=False)
        self.addCleanup(heketi_device_add,
                        h_client,
                        h_server,
                        device_name,
                        node_id,
                        raise_on_error=False)
        heketi_device_disable(h_client, h_server, device_id)
        heketi_device_remove(h_client, h_server, device_id)
        heketi_device_delete(h_client, h_server, device_id)
    def test_expansion_of_block_hosting_volume_using_heketi(self):
        """Verify that after expanding block hosting volume we are able to
        consume the expanded space"""

        h_node = self.heketi_client_node
        h_url = self.heketi_server_url
        bvols_in_bhv = set([])
        bvols_pv = set([])

        BHVS = get_block_hosting_volume_list(h_node, h_url)

        free_BHVS_count = 0
        for vol in BHVS.keys():
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] > 0:
                free_BHVS_count += 1
            if free_BHVS_count > 1:
                self.skipTest("Skip test case because there is more than one"
                              " Block Hosting Volume with free space")

        # create block volume of 1gb
        bvol_info = heketi_blockvolume_create(h_node, h_url, 1, json=True)

        expand_size = 20
        try:
            self.verify_free_space(expand_size)
            bhv = bvol_info['blockhostingvolume']
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols_in_bhv.update(vol_info['blockinfo']['blockvolume'])
        finally:
            # cleanup BHV if there is only one block volume inside it
            if len(bvols_in_bhv) == 1:
                self.addCleanup(
                    heketi_volume_delete, h_node, h_url, bhv, json=True)
            self.addCleanup(
                heketi_blockvolume_delete, h_node, h_url, bvol_info['id'])

        size = vol_info['size']
        free_size = vol_info['blockinfo']['freesize']
        bvol_count = int(free_size / expand_size)
        bricks = vol_info['bricks']

        # create pvs to fill the BHV
        pvcs = self.create_and_wait_for_pvcs(
            pvc_size=(expand_size if bvol_count else free_size),
            pvc_amount=(bvol_count or 1), timeout=300)

        vol_expand = True

        for i in range(2):
            # get the vol ids from pvcs
            for pvc in pvcs:
                pv = get_pv_name_from_pvc(self.node, pvc)
                custom = r':.metadata.annotations."gluster\.org\/volume-id"'
                bvol_id = oc_get_custom_resource(self.node, 'pv', custom, pv)
                bvols_pv.add(bvol_id[0])

            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols = vol_info['blockinfo']['blockvolume']
            bvols_in_bhv.update(bvols)
            self.assertEqual(bvols_pv, (bvols_in_bhv & bvols_pv))

            # Expand BHV and verify bricks and size of BHV
            if vol_expand:
                vol_expand = False
                heketi_volume_expand(
                    h_node, h_url, bhv, expand_size, json=True)
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)

                self.assertEqual(size + expand_size, vol_info['size'])
                self.assertFalse(len(vol_info['bricks']) % 3)
                self.assertLess(len(bricks), len(vol_info['bricks']))

                # create more PVCs in expanded BHV
                pvcs = self.create_and_wait_for_pvcs(
                    pvc_size=(expand_size - 1), pvc_amount=1)
    def test_expansion_of_block_hosting_volume_using_heketi(self):
        """Verify that after expanding block hosting volume we are able to
        consume the expanded space"""

        h_node = self.heketi_client_node
        h_url = self.heketi_server_url
        bvols_in_bhv = set([])
        bvols_pv = set([])

        BHVS = get_block_hosting_volume_list(h_node, h_url)

        free_BHVS_count = 0
        for vol in BHVS.keys():
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] > 0:
                free_BHVS_count += 1
            if free_BHVS_count > 1:
                self.skipTest("Skip test case because there is more than one"
                              " Block Hosting Volume with free space")

        # create block volume of 1gb
        bvol_info = heketi_blockvolume_create(h_node, h_url, 1, json=True)

        expand_size = 20
        try:
            self.verify_free_space(expand_size)
            bhv = bvol_info['blockhostingvolume']
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols_in_bhv.update(vol_info['blockinfo']['blockvolume'])
        finally:
            # cleanup BHV if there is only one block volume inside it
            if len(bvols_in_bhv) == 1:
                self.addCleanup(heketi_volume_delete,
                                h_node,
                                h_url,
                                bhv,
                                json=True)
            self.addCleanup(heketi_blockvolume_delete, h_node, h_url,
                            bvol_info['id'])

        size = vol_info['size']
        free_size = vol_info['blockinfo']['freesize']
        bvol_count = int(free_size / expand_size)
        bricks = vol_info['bricks']

        # create pvs to fill the BHV
        pvcs = self.create_and_wait_for_pvcs(
            pvc_size=(expand_size if bvol_count else free_size),
            pvc_amount=(bvol_count or 1),
            timeout=300)

        vol_expand = True

        for i in range(2):
            # get the vol ids from pvcs
            for pvc in pvcs:
                pv = get_pv_name_from_pvc(self.node, pvc)
                custom = r':.metadata.annotations."gluster\.org\/volume-id"'
                bvol_id = oc_get_custom_resource(self.node, 'pv', custom, pv)
                bvols_pv.add(bvol_id[0])

            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols = vol_info['blockinfo']['blockvolume']
            bvols_in_bhv.update(bvols)
            self.assertEqual(bvols_pv, (bvols_in_bhv & bvols_pv))

            # Expand BHV and verify bricks and size of BHV
            if vol_expand:
                vol_expand = False
                heketi_volume_expand(h_node,
                                     h_url,
                                     bhv,
                                     expand_size,
                                     json=True)
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)

                self.assertEqual(size + expand_size, vol_info['size'])
                self.assertFalse(len(vol_info['bricks']) % 3)
                self.assertLess(len(bricks), len(vol_info['bricks']))

                # create more PVCs in expanded BHV
                pvcs = self.create_and_wait_for_pvcs(pvc_size=(expand_size -
                                                               1),
                                                     pvc_amount=1)
Esempio n. 24
0
    def test_heketi_manual_cleanup_operation_in_bhv(self):
        """Validate heketi db cleanup will resolve the mismatch
           in the free size of the block hosting volume with failed
           block device create operations.
        """
        bhv_size_before, bhv_size_after, vol_count = [], [], 5
        ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes id list
        node_id_list = heketi_node_list(h_node, h_url)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_node_disable(h_node, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_url, node_id)

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        default_bhv_size = get_default_block_hosting_volume_size(
            h_node, self.heketi_dc_name)
        if default_bhv_size < h_volume_size:
            h_volume_name = "autotest-{}".format(utils.get_random_str())
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                free_space_available,
                raise_on_cleanup_error=False,
                block=True,
                json=True)
            free_space_available -= (
                int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
            h_volume_size = int(free_space_available / vol_count)

        # Get BHV list
        h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
        self.assertTrue(h_bhv_list, "Failed to get the BHV list")

        # Get BHV size
        for bhv in h_bhv_list:
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bhv_vol_size_before = vol_info.get("freesize")
            bhv_size_before.append(bhv_vol_size_before)

        # Kill Tcmu-runner service
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, g_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, g_node)

        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        # Create stale block volumes in async
        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    heketi_blockvolume_create(h_node,
                                              h_url,
                                              h_volume_size,
                                              json=True)

        # Wait for pending operation to get generated
        self._check_for_pending_operations(h_node, h_url)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service, g_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, g_node)

        # Cleanup pending operation
        heketi_server_operation_cleanup(h_node, h_url)

        # wait for pending operation to get cleaned up
        for w in waiter.Waiter(timeout=120, interval=10):
            # Get BHV size
            for bhv in h_bhv_list:
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
                bhv_vol_size_after = vol_info.get("freesize")
                bhv_size_after.append(bhv_vol_size_after)

            if (set(bhv_size_before) == set(bhv_size_after)):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to Validate volume size Actual:{},"
                " Expected:{}".format(set(bhv_size_before),
                                      set(bhv_size_after)))
Esempio n. 25
0
    def test_udev_usage_in_container(self):
        """Validate LVM inside container does not use udev"""

        # Skip the TC if independent mode deployment
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as it needs to run on "
                          "converged mode deployment")

        h_client, h_url = self.heketi_client_node, self.heketi_server_url
        server_info = list(g.config.get('gluster_servers').values())[0]
        server_node = server_info.get('manage')
        additional_device = server_info.get('additional_devices')[0]

        # command to run pvscan
        cmd_pvscan = "timeout 300 pvscan"

        # Get pod name from on host
        for pod_info in self.pod_name:
            if pod_info.get('pod_hostname') == server_node:
                pod_name = pod_info.get('pod_name')
                break

        # Create file volume
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_url,
                                                   self.volume_size,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_client, h_url,
                        vol_info.get("id"))

        # Create block volume
        block_vol_info = heketi_ops.heketi_blockvolume_create(h_client,
                                                              h_url,
                                                              self.volume_size,
                                                              json=True)
        self.addCleanup(heketi_ops.heketi_blockvolume_delete, h_client, h_url,
                        block_vol_info.get("id"))

        # Check dmeventd service in container
        err_msg = "dmeventd.service is running on setup"
        with self.assertRaises(AssertionError, msg=err_msg):
            openshift_ops.oc_rsh(self.oc_node, pod_name,
                                 "systemctl is-active dmeventd.service")

        # Service dmeventd should not be running in background
        with self.assertRaises(AssertionError, msg=err_msg):
            openshift_ops.oc_rsh(self.oc_node, pod_name,
                                 "ps aux | grep dmeventd.service")

        # Perform a pvscan in contaier
        openshift_ops.oc_rsh(self.oc_node, pod_name, cmd_pvscan)

        # Get heketi node to add new device
        heketi_node_list = heketi_ops.heketi_node_list(h_client, h_url)
        for h_node_id in heketi_node_list:
            h_node_info = heketi_ops.heketi_node_info(h_client,
                                                      h_url,
                                                      h_node_id,
                                                      json=True)
            h_node_host = h_node_info.get('hostnames', {}).get('manage')[0]
            if h_node_host == server_node:
                break

        # Add new device to the node
        heketi_ops.heketi_device_add(h_client, h_url, additional_device,
                                     h_node_id)
        h_node_info = heketi_ops.heketi_node_info(h_client,
                                                  h_url,
                                                  h_node_id,
                                                  json=True)
        h_device_id = [
            device.get('id') for device in h_node_info.get('devices')
            if device.get('name') == additional_device
        ]
        self.addCleanup(heketi_ops.heketi_device_delete, h_client, h_url,
                        h_device_id[0])
        self.addCleanup(heketi_ops.heketi_device_remove, h_client, h_url,
                        h_device_id[0])
        self.addCleanup(heketi_ops.heketi_device_disable, h_client, h_url,
                        h_device_id[0])

        # Reboot the node on which device is added
        self.addCleanup(self._check_heketi_and_gluster_pod_after_node_reboot,
                        server_node)
        node_ops.node_reboot_by_command(server_node)

        # Wait node to become NotReady
        custom = r'":.status.conditions[?(@.type==\"Ready\")]".status'
        for w in waiter.Waiter(300, 10):
            status = openshift_ops.oc_get_custom_resource(
                self.oc_node, 'node', custom, server_node)
            if status[0] == 'False':
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to bring node down {}".format(server_node))

        # Wait for node to become ready
        openshift_ops.wait_for_ocp_node_be_ready(self.oc_node, server_node)

        # Wait for heketi and glusterfs pod to become ready
        self._check_heketi_and_gluster_pod_after_node_reboot(server_node)

        # Perform a pvscan in contaier
        openshift_ops.oc_rsh(self.oc_node, pod_name, cmd_pvscan)