def test_device_delete_with_bricks(self): """Validate device deletion with existing bricks on the device""" h_node, h_url = self.heketi_client_node, self.heketi_server_url # Create volume vol_size = 1 vol_info = heketi_volume_create(h_node, h_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, h_node, h_url, vol_info['id']) device_delete_id = vol_info['bricks'][0]['device'] node_id = vol_info['bricks'][0]['node'] device_info = heketi_device_info(h_node, h_url, device_delete_id, json=True) device_name = device_info['name'] # Disable the device heketi_device_disable(h_node, h_url, device_delete_id) self.addCleanup(heketi_device_enable, h_node, h_url, device_delete_id) # Delete device with bricks with self.assertRaises(AssertionError): heketi_device_delete(h_node, h_url, device_delete_id) self.addCleanup(heketi_device_add, h_node, h_url, device_name, node_id)
def test_heketi_device_delete(self): """Test Heketi device delete operation""" # Get list of additional devices for one of the Gluster nodes ip_with_devices = {} for gluster_server in g.config["gluster_servers"].values(): if not gluster_server.get("additional_devices"): continue ip_with_devices = { gluster_server['storage']: gluster_server['additional_devices'] } break # Skip test if no additional device is available if not ip_with_devices: self.skipTest( "No additional devices attached to any of the gluster nodes") # Select any additional device and get the node id of the gluster node h_node, h_server = self.heketi_client_node, self.heketi_server_url node_id, device_name = None, list(ip_with_devices.values())[0][0] topology_info = heketi_topology_info(h_node, h_server, json=True) for node in topology_info["clusters"][0]["nodes"]: if list(ip_with_devices.keys())[0] == ( node['hostnames']["storage"][0]): node_id = node["id"] break self.assertTrue(node_id) # Add additional device to the cluster heketi_device_add(h_node, h_server, device_name, node_id) # Get the device id and number of bricks on the device node_info_after_addition = heketi_node_info(h_node, h_server, node_id, json=True) device_id, bricks = None, None for device in node_info_after_addition["devices"]: if device["name"] == device_name: device_id, bricks = device["id"], len(device['bricks']) break self.assertTrue(device_id, "Device not added in expected node") # Delete heketi device heketi_device_disable(h_node, h_server, device_id) heketi_device_remove(h_node, h_server, device_id) heketi_device_delete(h_node, h_server, device_id) # Verify that there were no bricks on the newly added device msg = ( "Number of bricks on the device %s of the node %s should be zero" % (device_name, list(ip_with_devices.keys())[0])) self.assertEqual(0, bricks, msg) # Verify device deletion node_info_after_deletion = heketi_node_info(h_node, h_server, node_id) msg = ("Device %s should not be shown in node info of the node %s" "after the device deletion" % (device_id, node_id)) self.assertNotIn(device_id, node_info_after_deletion, msg)
def _get_vol_size(self): # Get available free space disabling redundant nodes min_free_space_gb = 5 heketi_url = self.heketi_server_url node_ids = heketi_node_list(self.heketi_client_node, heketi_url) self.assertTrue(node_ids) nodes = {} min_free_space = min_free_space_gb * 1024**2 for node_id in node_ids: node_info = heketi_node_info(self.heketi_client_node, heketi_url, node_id, json=True) if (node_info['state'].lower() != 'online' or not node_info['devices']): continue if len(nodes) > 2: out = heketi_node_disable(self.heketi_client_node, heketi_url, node_id) self.assertTrue(out) self.addCleanup(heketi_node_enable, self.heketi_client_node, heketi_url, node_id) for device in node_info['devices']: if device['state'].lower() != 'online': continue free_space = device['storage']['free'] if free_space < min_free_space: out = heketi_device_disable(self.heketi_client_node, heketi_url, device['id']) self.assertTrue(out) self.addCleanup(heketi_device_enable, self.heketi_client_node, heketi_url, device['id']) continue if node_id not in nodes: nodes[node_id] = [] nodes[node_id].append(device['storage']['free']) # Skip test if nodes requirements are not met if (len(nodes) < 3 or not all(map( (lambda _list: len(_list) > 1), nodes.values()))): raise self.skipTest("Could not find 3 online nodes with, " "at least, 2 online devices having free space " "bigger than %dGb." % min_free_space_gb) # Calculate size of a potential distributed vol vol_size_gb = int(min(map(max, nodes.values())) / (1024**2)) + 1 return vol_size_gb
def _available_disk_free_space(self): min_free_space_gb = 3 # Get available free space disabling redundant devices and nodes heketi_url = self.heketi_server_url node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, heketi_url) self.assertTrue(node_id_list) nodes = {} min_free_space = min_free_space_gb * 1024**2 for node_id in node_id_list: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, heketi_url, node_id, json=True) if (node_info['state'].lower() != 'online' or not node_info['devices']): continue if len(nodes) > 2: self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, heketi_url, node_id) out = heketi_ops.heketi_node_disable(self.heketi_client_node, heketi_url, node_id) self.assertTrue(out) for device in node_info['devices']: if device['state'].lower() != 'online': continue free_space = device['storage']['free'] if (node_id in nodes.keys() or free_space < min_free_space): out = heketi_ops.heketi_device_disable( self.heketi_client_node, heketi_url, device['id']) self.assertTrue(out) self.addCleanup(heketi_ops.heketi_device_enable, self.heketi_client_node, heketi_url, device['id']) continue nodes[node_id] = free_space if len(nodes) < 3: raise self.skipTest("Could not find 3 online nodes with, " "at least, 1 online device having free space " "bigger than %dGb." % min_free_space_gb) # Calculate maximum available size for PVC available_size_gb = int(min(nodes.values()) / (1024**2)) return available_size_gb
def detach_devices_attached(self, device_id_list): """ All the devices attached are gracefully detached in this function """ if not isinstance(device_id_list, (tuple, set, list)): device_id_list = [device_id_list] for device_id in device_id_list: device_disable = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual(device_disable, False, "Device %s could not be disabled" % device_id) device_remove = heketi_ops.heketi_device_remove( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual(device_remove, False, "Device %s could not be removed" % device_id) device_delete = heketi_ops.heketi_device_delete( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual(device_delete, False, "Device %s could not be deleted" % device_id)
def detach_devices_attached(self, device_id_list): """ All the devices attached are gracefully detached in this function """ if not isinstance(device_id_list, (tuple, set, list)): device_id_list = [device_id_list] for device_id in device_id_list: device_disable = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual( device_disable, False, "Device %s could not be disabled" % device_id) device_remove = heketi_ops.heketi_device_remove( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual( device_remove, False, "Device %s could not be removed" % device_id) device_delete = heketi_ops.heketi_device_delete( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual( device_delete, False, "Device %s could not be deleted" % device_id)
def _add_new_device_and_remove_existing_device( self, is_delete_device, add_device_name, node_id, add_back_again=False, skip_cleanup_addition=False): """Delete or remove device and also add one device on the same node. """ h_client, h_url = self.heketi_client_node, self.heketi_server_url raise_on_error = False if add_back_again else True # Iterate chosen node devices and pick the smallest online one. lowest_device_size = lowest_device_id = None online_hosts = self.get_online_nodes_disable_redundant() for host in online_hosts[0:3]: if node_id != host["id"]: continue for device in host["devices"]: if device["state"].strip().lower() != "online": continue if (lowest_device_size is None or device["storage"]["total"] < lowest_device_size): lowest_device_size = device["storage"]["total"] lowest_device_id = device["id"] lowest_device_name = device["name"] if lowest_device_id is None: self.skipTest( "Didn't find suitable device for disablement on '%s' node." % (node_id)) # Create volume vol_size = 1 vol_info = heketi_volume_create(h_client, h_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) # Add extra device, then remember it's ID and size device_id_new, device_size_new = self._add_heketi_device( add_device_name, node_id, raise_on_error) if lowest_device_size > device_size_new: skip_msg = ("Skip test case, because newly added disk %s is " "smaller than device which we want to remove %s." % (device_size_new, lowest_device_size)) self.skipTest(skip_msg) g.log.info("Removing device id %s" % lowest_device_id) with self.assertRaises(AssertionError): out = heketi_device_remove(h_client, h_url, lowest_device_id) self.addCleanup(heketi_device_enable, h_client, h_url, lowest_device_id) self.addCleanup(heketi_device_disable, h_client, h_url, lowest_device_id) self.assertFalse(True, "Device removal didn't fail: %s" % out) g.log.info("Device removal failed as expected") # Need to disable device before removing heketi_device_disable(h_client, h_url, lowest_device_id) if not is_delete_device: self.addCleanup(heketi_device_enable, h_client, h_url, lowest_device_id) # Remove device from Heketi try: heketi_device_remove(h_client, h_url, lowest_device_id) except Exception: if is_delete_device: self.addCleanup(heketi_device_enable, h_client, h_url, lowest_device_id, raise_on_error=raise_on_error) raise if not is_delete_device: self.addCleanup(heketi_device_disable, h_client, h_url, lowest_device_id) if is_delete_device: try: heketi_device_delete(h_client, h_url, lowest_device_id) except Exception: self.addCleanup(heketi_device_enable, h_client, h_url, lowest_device_id, raise_on_error=raise_on_error) self.addCleanup(heketi_device_disable, h_client, h_url, lowest_device_id, raise_on_error=raise_on_error) raise if not skip_cleanup_addition: # Do not add the additional device back, intially added self.addCleanup(heketi_device_add, h_client, h_url, lowest_device_name, node_id, raise_on_error=raise_on_error) # Create volume vol_info = heketi_volume_create(h_client, h_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, h_client, h_url, vol_info['id']) if is_delete_device: return lowest_device_name # Check that none of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], lowest_device_id) self.assertFalse( present, "Some of the '%s' volume bricks is present of the removed " "'%s' device." % (vol_info['id'], lowest_device_id))
def test_pv_resize_device_disabled(self): """Validate resize after disabling all devices except one""" h_node, h_url = self.heketi_client_node, self.heketi_server_url # expand volume size and path volume is mounted expand_size, dir_path = 7, "/mnt" # Get nodes info heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_url) if len(heketi_node_id_list) < 3: self.skipTest( "At-least 3 gluster nodes are required to execute test case") self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc(pvc_size=2) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) dc_name, pod_name = self.create_dc_with_pvc(pvc_name) self._write_file(pod_name, "file1", "1G", dir_path) with self.assertRaises(AssertionError): self._write_file(pod_name, "file2", "3G", dir_path) # Prepare first 3 nodes and then disable other devices. for node_id in heketi_node_id_list[:3]: node_info = heketi_ops.heketi_node_info(h_node, h_url, node_id, json=True) self.assertTrue(node_info, "Failed to get node info") devices = node_info.get("devices", None) self.assertTrue(devices, "Node {} does not have devices".format(node_id)) if devices[0]["state"].strip().lower() != "online": self.skipTest("Skipping test as it expects to first device to" " be enabled") for device in devices[1:]: heketi_ops.heketi_device_disable(h_node, h_url, device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_url, device["id"]) usedsize_before_resize = self._get_mount_size(pod_name, dir_path) # Resize pvc resize_pvc(self.node, pvc_name, expand_size) verify_pvc_size(self.node, pvc_name, expand_size) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertFalse(len(vol_info['bricks']['brick']) % 3) for node_id in heketi_node_id_list[:3]: for device in devices[1:]: heketi_ops.heketi_device_enable(h_node, h_url, device["id"]) self._write_file(pod_name, "file3", "3G", dir_path) usedsize_after_resize = self._get_mount_size(pod_name, dir_path) self.assertGreater( int(usedsize_before_resize.strip('%')), int(usedsize_after_resize.strip('%')), "Mount size {} should be greater than {}".format( usedsize_before_resize, usedsize_after_resize)) self._write_file(pod_name, "file4", "1024", dir_path) # Validate dist-rep volume with 6 bricks after pv resize vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertEqual( 6, len(vol_info['bricks']['brick']), "Expected bricks count is 6, but actual brick count is {}".format( len(vol_info['bricks']['brick'])))
def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" # Get nodes info node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, self.heketi_server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes if len(node_id_list) > 3: for node_id in node_id_list[3:]: heketi_ops.heketi_node_disable(self.heketi_client_node, self.heketi_server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, self.heketi_server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device["id"]) # Create heketi volume out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create heketi volume of size 1") g.log.info("Successfully created heketi volume of size 1") device_id = out["bricks"][0]["device"] self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) # Disable device g.log.info("Disabling '%s' device" % device_id) out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to disable the device %s" % device_id) g.log.info("Successfully disabled device %s" % device_id) try: # Get device info g.log.info("Retrieving '%s' device info" % device_id) out = heketi_ops.heketi_device_info(self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, "Failed to get device info %s" % device_id) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] self.assertEqual(out["state"].lower().strip(), "offline", "Device %s is not in offline state." % name) g.log.info("Device %s is now offine" % name) # Try to create heketi volume g.log.info("Creating heketi volume: Expected to fail.") try: out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) except AssertionError: g.log.info("Volume was not created as expected.") else: self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) msg = "Volume unexpectedly created. Out: %s" % out assert False, msg finally: # Enable the device back g.log.info("Enable '%s' device back." % device_id) out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to enable the device %s" % device_id) g.log.info("Successfully enabled device %s" % device_id) # Get device info out = heketi_ops.heketi_device_info(self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, ("Failed to get device info %s" % device_id)) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] self.assertEqual(out["state"], "online", "Device %s is not in online state." % name) # Create heketi volume of size out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create volume of size 1") self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) g.log.info("Successfully created volume of size 1") name = out["name"] # Get gluster volume info vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) self.assertTrue(vol_info, "Failed to get '%s' volume info." % name) g.log.info("Successfully got the '%s' volume info." % name)
def test_check_device_disable_based_on_heketi_zone( self, zone_count, is_disable_on_different_zone, is_set_env=False): """Validate device disable in different heketi zones""" online_device_count, expected_device_count = 0, 4 expected_node_count, heketi_zone_checking, sc_name = 4, "strict", None # Check amount of available online nodes online_node_count = len(self._get_online_nodes()) if online_node_count < expected_node_count: self.skipTest('Available node count {} is less than expected node ' 'count {}'.format(online_node_count, expected_node_count)) # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Get the online devices and nodes w.r.t. to zone zone_devices_nodes = self._get_online_devices_and_nodes_with_zone() # Check amount of available online heketi devices for zone in zone_devices_nodes: online_device_count += len(zone_devices_nodes[zone]['devices']) if online_device_count < expected_device_count: self.skipTest( "Expected the heketi device count {} is greater than the " "available device count {}".format(expected_device_count, online_device_count)) # Create sc or else directly set env to "strict" inside dc is_create_sc = not is_set_env if is_create_sc: sc_name = self.create_storage_class( sc_name_prefix=self.prefix, vol_name_prefix=self.prefix, heketi_zone_checking=heketi_zone_checking) if is_set_env: self._set_zone_check_env_in_heketi_dc(heketi_zone_checking) # Choose a zone and device_id to disable the device for zone, nodes_and_devices in zone_devices_nodes.items(): if zone_count == 3: # Select a device with a zone having multiple nodes in # same zone to cover the test cases "disable in same zone" if len(nodes_and_devices['devices']) > 1: zone_with_disabled_device = zone disabled_device = nodes_and_devices['devices'][0] break else: # Select device from any of the zones zone_with_disabled_device = zone disabled_device = nodes_and_devices['devices'][0] break # Disable the selected device heketi_ops.heketi_device_disable(self.h_client, self.h_server, disabled_device) self.addCleanup(heketi_ops.heketi_device_enable, self.h_client, self.h_server, disabled_device) # Create some DCs with PVCs and check brick placement in heketi zones pod_names = self._create_dcs_and_check_brick_placement( self.prefix, sc_name, heketi_zone_checking, zone_count) # Enable disabled device heketi_ops.heketi_device_enable(self.h_client, self.h_server, disabled_device) if is_disable_on_different_zone: # Select the new device in a different zone for zone, nodes_and_devices in zone_devices_nodes.items(): if zone != zone_with_disabled_device: new_device_to_disable = nodes_and_devices['devices'][0] break else: # Select the new device in the same zone new_device_to_disable = zone_devices_nodes[ zone_with_disabled_device]['devices'][1] # Disable the newly selected device heketi_ops.heketi_device_disable(self.h_client, self.h_server, new_device_to_disable) self.addCleanup(heketi_ops.heketi_device_enable, self.h_client, self.h_server, new_device_to_disable) # Verify if pods are in ready state for pod_name in pod_names: openshift_ops.wait_for_pod_be_ready(self.node, pod_name, timeout=5, wait_step=2)
def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest( "Nodes are expected to have at least 2 devices") if not all([int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2]]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min([ n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > ( devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable( self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_heketi_device_removal_with_insuff_space(self): """Validate heketi with device removal insufficient space""" # Disable 4+ nodes and 3+ devices on the first 3 nodes min_free_space_gb = 5 min_free_space = min_free_space_gb * 1024**2 heketi_url = self.heketi_server_url heketi_node = self.heketi_client_node nodes = {} node_ids = heketi_node_list(heketi_node, heketi_url) self.assertTrue(node_ids) for node_id in node_ids: node_info = heketi_node_info(heketi_node, heketi_url, node_id, json=True) if (node_info["state"].lower() != "online" or not node_info["devices"]): continue if len(nodes) > 2: heketi_node_disable(heketi_node, heketi_url, node_id) self.addCleanup(heketi_node_enable, heketi_node, heketi_url, node_id) continue for device in node_info["devices"]: if device["state"].lower() != "online": continue free_space = device["storage"]["free"] if node_id not in nodes: nodes[node_id] = [] if (free_space < min_free_space or len(nodes[node_id]) > 1): heketi_device_disable(heketi_node, heketi_url, device["id"]) self.addCleanup(heketi_device_enable, heketi_node, heketi_url, device["id"]) continue nodes[node_id].append({ "device_id": device["id"], "free": free_space }) # Skip test if nodes requirements are not met if (len(nodes) < 3 or not all(map((lambda _l: len(_l) > 1), nodes.values()))): raise self.skipTest( "Could not find 3 online nodes with 2 online devices " "having free space bigger than %dGb." % min_free_space_gb) # Calculate size of a potential distributed vol if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]: index = 0 else: index = 1 vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024**2)) + 1 device_id = nodes[node_ids[0]][index]["device_id"] # Create volume with such size that we consume space more than # size of smaller disks h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str() try: self.create_heketi_volume_with_name_and_wait(h_volume_name, vol_size_gb, json=True) except Exception as e: # NOTE: rare situation when we need to decrease size of a volume. g.log.info("Failed to create '%s'Gb volume. " "Trying to create another one, smaller for 1Gb.") if not ('more required' in str(e) and ('Insufficient suitable allocatable extents for ' 'logical volume' in str(e))): raise vol_size_gb -= 1 self.create_heketi_volume_with_name_and_wait(h_volume_name, vol_size_gb, json=True) # Try to 'remove' bigger Heketi disk expecting error, # because there is no space on smaller disk to relocate bricks to heketi_device_disable(heketi_node, heketi_url, device_id) self.addCleanup(heketi_device_enable, heketi_node, heketi_url, device_id) try: self.assertRaises(AssertionError, heketi_device_remove, heketi_node, heketi_url, device_id) except Exception: self.addCleanup(heketi_device_disable, heketi_node, heketi_url, device_id) raise
def test_heketi_with_device_removal_insuff_space(self): """Validate heketi with device removal insufficient space""" # Disable 4+ nodes and 3+ devices on the first 3 nodes min_free_space_gb = 5 min_free_space = min_free_space_gb * 1024**2 heketi_url = self.heketi_server_url heketi_node = self.heketi_client_node nodes = {} node_ids = heketi_node_list(heketi_node, heketi_url) self.assertTrue(node_ids) for node_id in node_ids: node_info = heketi_node_info(heketi_node, heketi_url, node_id, json=True) if (node_info["state"].lower() != "online" or not node_info["devices"]): continue if len(nodes) > 2: heketi_node_disable(heketi_node, heketi_url, node_id) self.addCleanup(heketi_node_enable, heketi_node, heketi_url, node_id) continue for device in node_info["devices"]: if device["state"].lower() != "online": continue free_space = device["storage"]["free"] if node_id not in nodes: nodes[node_id] = [] if (free_space < min_free_space or len(nodes[node_id]) > 1): heketi_device_disable(heketi_node, heketi_url, device["id"]) self.addCleanup(heketi_device_enable, heketi_node, heketi_url, device["id"]) continue nodes[node_id].append({ "device_id": device["id"], "free": free_space }) # Skip test if nodes requirements are not met if (len(nodes) < 3 or not all(map( (lambda _list: len(_list) > 1), nodes.values()))): raise self.skipTest( "Could not find 3 online nodes with 2 online devices " "having free space bigger than %dGb." % min_free_space_gb) # Calculate size of a potential distributed vol if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]: index = 0 else: index = 1 vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024**2)) + 1 device_id = nodes[node_ids[0]][index]["device_id"] # Create volume with such size that we consume space more than # size of smaller disks try: heketi_vol = heketi_volume_create(heketi_node, heketi_url, vol_size_gb, json=True) except Exception as e: g.log.warning( "Got following error trying to create '%s'Gb vol: %s" % (vol_size_gb, e)) vol_size_gb -= 1 heketi_vol = heketi_volume_create(heketi_node, heketi_url, vol_size_gb, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, heketi_vol["bricks"][0]["volume"]) # Try to 'remove' bigger Heketi disk expecting error, # because there is no space on smaller disk to relocate bricks to heketi_device_disable(heketi_node, heketi_url, device_id) self.addCleanup(heketi_device_enable, heketi_node, heketi_url, device_id) try: self.assertRaises(ExecutionError, heketi_device_remove, heketi_node, heketi_url, device_id) except Exception: self.addCleanup(heketi_device_disable, heketi_node, heketi_url, device_id) raise
def _pv_resize(self, exceed_free_space): dir_path = "/mnt" pvc_size_gb, min_free_space_gb = 1, 3 # Get available free space disabling redundant devices and nodes heketi_url = self.heketi_server_url node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, heketi_url) self.assertTrue(node_id_list) nodes = {} min_free_space = min_free_space_gb * 1024**2 for node_id in node_id_list: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, heketi_url, node_id, json=True) if (node_info['state'].lower() != 'online' or not node_info['devices']): continue if len(nodes) > 2: out = heketi_ops.heketi_node_disable(self.heketi_client_node, heketi_url, node_id) self.assertTrue(out) self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, heketi_url, node_id) for device in node_info['devices']: if device['state'].lower() != 'online': continue free_space = device['storage']['free'] if (node_id in nodes.keys() or free_space < min_free_space): out = heketi_ops.heketi_device_disable( self.heketi_client_node, heketi_url, device['id']) self.assertTrue(out) self.addCleanup(heketi_ops.heketi_device_enable, self.heketi_client_node, heketi_url, device['id']) continue nodes[node_id] = free_space if len(nodes) < 3: raise self.skipTest("Could not find 3 online nodes with, " "at least, 1 online device having free space " "bigger than %dGb." % min_free_space_gb) # Calculate maximum available size for PVC available_size_gb = int(min(nodes.values()) / (1024**2)) # Create PVC self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb) # Create DC with POD and attached PVC to it dc_name = oc_create_app_dc_with_io(self.node, pvc_name) self.addCleanup(oc_delete, self.node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) if exceed_free_space: # Try to expand existing PVC exceeding free space resize_pvc(self.node, pvc_name, available_size_gb) wait_for_events(self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed') # Check that app POD is up and runnig then try to write data wait_for_pod_be_ready(self.node, pod_name) cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path) ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( ret, 0, "Failed to write data after failed attempt to expand PVC.") else: # Expand existing PVC using all the available free space expand_size_gb = available_size_gb - pvc_size_gb resize_pvc(self.node, pvc_name, expand_size_gb) verify_pvc_size(self.node, pvc_name, expand_size_gb) pv_name = get_pv_name_from_pvc(self.node, pvc_name) verify_pv_size(self.node, pv_name, expand_size_gb) wait_for_events(self.node, obj_name=pvc_name, event_reason='VolumeResizeSuccessful') # Recreate app POD oc_delete(self.node, 'pod', pod_name) wait_for_resource_absence(self.node, 'pod', pod_name) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) # Write data on the expanded PVC cmd = ("dd if=/dev/urandom of=%s/autotest " "bs=1M count=1025" % dir_path) ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual(ret, 0, "Failed to write data on the expanded PVC")
def test_volume_expansion_no_free_space(self): """Validate volume expansion when there is no free space""" vol_size, expand_size, additional_devices_attached = None, 10, {} h_node, h_server_url = self.heketi_client_node, self.heketi_server_url # Get nodes info heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url) if len(heketi_node_id_list) < 3: self.skipTest("3 Heketi nodes are required.") # Disable 4th and other nodes for node_id in heketi_node_id_list[3:]: heketi_ops.heketi_node_disable(h_node, h_server_url, node_id) self.addCleanup( heketi_ops.heketi_node_enable, h_node, h_server_url, node_id) # Prepare first 3 nodes smallest_size = None err_msg = '' for node_id in heketi_node_id_list[0:3]: node_info = heketi_ops.heketi_node_info( h_node, h_server_url, node_id, json=True) # Disable second and other devices devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_id) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if (smallest_size is None or devices[0]["storage"]["free"] < smallest_size): smallest_size = devices[0]["storage"]["free"] for device in node_info["devices"][1:]: heketi_ops.heketi_device_disable( h_node, h_server_url, device["id"]) self.addCleanup( heketi_ops.heketi_device_enable, h_node, h_server_url, device["id"]) # Gather info about additional devices additional_device_name = None for gluster_server in self.gluster_servers: gluster_server_data = self.gluster_servers_info[gluster_server] g_manage = gluster_server_data["manage"] g_storage = gluster_server_data["storage"] if not (g_manage in node_info["hostnames"]["manage"] or g_storage in node_info["hostnames"]["storage"]): continue additional_device_name = (( gluster_server_data.get("additional_devices") or [''])[0]) break if not additional_device_name: err_msg += ("No 'additional_devices' are configured for " "'%s' node, which has following hostnames and " "IP addresses: %s.\n" % ( node_id, ', '.join(node_info["hostnames"]["manage"] + node_info["hostnames"]["storage"]))) continue heketi_ops.heketi_device_add( h_node, h_server_url, additional_device_name, node_id) additional_devices_attached.update( {node_id: additional_device_name}) # Schedule cleanup of the added devices for node_id in additional_devices_attached.keys(): node_info = heketi_ops.heketi_node_info( h_node, h_server_url, node_id, json=True) for device in node_info["devices"]: if device["name"] != additional_devices_attached[node_id]: continue self.addCleanup(self.detach_devices_attached, device["id"]) break else: self.fail("Could not find ID for added device on " "'%s' node." % node_id) if err_msg: self.skipTest(err_msg) # Temporary disable new devices self.disable_devices(additional_devices_attached) # Create volume and save info about it vol_size = int(smallest_size / (1024**2)) - 1 creation_info = heketi_ops.heketi_volume_create( h_node, h_server_url, vol_size, json=True) volume_name, volume_id = creation_info["name"], creation_info["id"] self.addCleanup( heketi_ops.heketi_volume_delete, h_node, h_server_url, volume_id, raise_on_error=False) volume_info_before_expansion = heketi_ops.heketi_volume_info( h_node, h_server_url, volume_id, json=True) num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) self.get_brick_and_volume_status(volume_name) free_space_before_expansion = self.get_devices_summary_free_space() # Try to expand volume with not enough device space self.assertRaises( ExecutionError, heketi_ops.heketi_volume_expand, h_node, h_server_url, volume_id, expand_size) # Enable new devices to be able to expand our volume self.enable_devices(additional_devices_attached) # Expand volume and validate results heketi_ops.heketi_volume_expand( h_node, h_server_url, volume_id, expand_size, json=True) free_space_after_expansion = self.get_devices_summary_free_space() self.assertGreater( free_space_before_expansion, free_space_after_expansion, "Free space not consumed after expansion of %s" % volume_id) num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) self.get_brick_and_volume_status(volume_name) volume_info_after_expansion = heketi_ops.heketi_volume_info( h_node, h_server_url, volume_id, json=True) self.assertGreater( volume_info_after_expansion["size"], volume_info_before_expansion["size"], "Size of %s not increased" % volume_id) self.assertGreater( num_of_bricks_after_expansion, num_of_bricks_before_expansion) self.assertEqual( num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0) # Delete volume and validate release of the used space heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id) free_space_after_deletion = self.get_devices_summary_free_space() self.assertGreater( free_space_after_deletion, free_space_after_expansion, "Free space not reclaimed after deletion of volume %s" % volume_id)
def test_volume_creation_no_free_devices(self): """Validate heketi error is returned when no free devices available""" node, server_url = self.heketi_client_node, self.heketi_server_url # Get nodes info node_id_list = heketi_ops.heketi_node_list(node, server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info(node, server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes for node_id in node_id_list[3:]: heketi_ops.heketi_node_disable(node, server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, node, server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable(node, server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, node, server_url, device["id"]) # Calculate common available space available_spaces = [ int(node_info["devices"][0]["storage"]["free"]) for n in node_info_list[0:3] ] min_space_gb = int(min(available_spaces) / 1024**2) self.assertGreater(min_space_gb, 3, "Not enough available free space.") # Create first small volume vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True) self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol["id"]) # Try to create second volume getting "no free space" error try: vol_fail = heketi_ops.heketi_volume_create(node, server_url, min_space_gb, json=True) except AssertionError: g.log.info("Volume was not created as expected.") else: self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_fail["bricks"][0]["volume"]) self.assertFalse( vol_fail, "Volume should have not been created. Out: %s" % vol_fail)
def test_volume_creation_of_size_greater_than_the_device_size(self): """Validate creation of a volume of size greater than the size of a device. """ h_node, h_url = self.heketi_client_node, self.heketi_server_url # Remove existing BHV to calculate freespace bhv_list = heketi_ops.get_block_hosting_volume_list(h_node, h_url) if bhv_list: for bhv in bhv_list: bhv_info = heketi_ops.heketi_volume_info(h_node, h_url, bhv, json=True) if bhv_info['blockinfo'].get('blockvolume') is None: heketi_ops.heketi_volume_delete(h_node, h_url, bhv) topology = heketi_ops.heketi_topology_info(h_node, h_url, json=True) nodes_free_space, nodes_ips = [], [] selected_nodes, selected_devices = [], [] cluster = topology['clusters'][0] node_count = len(cluster['nodes']) msg = ("At least 3 Nodes are required in cluster. " "But only %s Nodes are present." % node_count) if node_count < 3: self.skipTest(msg) online_nodes_count = 0 for node in cluster['nodes']: nodes_ips.append(node['hostnames']['storage'][0]) if node['state'] != 'online': continue online_nodes_count += 1 # Disable nodes after 3rd online nodes if online_nodes_count > 3: heketi_ops.heketi_node_disable(h_node, h_url, node['id']) self.addCleanup(heketi_ops.heketi_node_enable, h_node, h_url, node['id']) continue selected_nodes.append(node['id']) device_count = len(node['devices']) msg = ("At least 2 Devices are required on each Node." "But only %s Devices are present." % device_count) if device_count < 2: self.skipTest(msg) sel_devices, online_devices_count, free_space = [], 0, 0 for device in node['devices']: if device['state'] != 'online': continue online_devices_count += 1 # Disable devices after 2nd online devices if online_devices_count > 2: heketi_ops.heketi_device_disable(h_node, h_url, device['id']) self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_url, device['id']) continue sel_devices.append(device['id']) free_space += int(device['storage']['free'] / (1024**2)) selected_devices.append(sel_devices) nodes_free_space.append(free_space) msg = ("At least 2 online Devices are required on each Node. " "But only %s Devices are online on Node: %s." % (online_devices_count, node['id'])) if online_devices_count < 2: self.skipTest(msg) msg = ("At least 3 online Nodes are required in cluster. " "But only %s Nodes are online in Cluster: %s." % (online_nodes_count, cluster['id'])) if online_nodes_count < 3: self.skipTest(msg) # Select node with minimum free space min_free_size = min(nodes_free_space) index = nodes_free_space.index(min_free_size) # Get max device size from selected node device_size = 0 for device in selected_devices[index]: device_info = heketi_ops.heketi_device_info(h_node, h_url, device, json=True) device_size = max(device_size, (int(device_info['storage']['total'] / (1024**2)))) vol_size = device_size + 1 if vol_size >= min_free_size: self.skipTest('Required free space %s is not available' % vol_size) # Create heketi volume with device size + 1 vol_info = self.create_heketi_volume_with_name_and_wait( name="volume_size_greater_than_device_size", size=vol_size, json=True) # Get gluster server IP's from heketi volume info glusterfs_servers = heketi_ops.get_vol_file_servers_and_hosts( h_node, h_url, vol_info['id']) # Verify gluster server IP's in heketi volume info msg = ("gluster IP's '%s' does not match with IP's '%s' found in " "heketi volume info" % (nodes_ips, glusterfs_servers['vol_servers'])) self.assertEqual(set(glusterfs_servers['vol_servers']), set(nodes_ips), msg) vol_name = vol_info['name'] gluster_v_info = self.get_gluster_vol_info(vol_name) # Verify replica count in gluster v info msg = "Volume %s is replica %s instead of replica 3" % ( vol_name, gluster_v_info['replicaCount']) self.assertEqual('3', gluster_v_info['replicaCount']) # Verify distCount in gluster v info msg = "Volume %s distCount is %s instead of distCount as 3" % ( vol_name, int(gluster_v_info['distCount'])) self.assertEqual( int(gluster_v_info['brickCount']) // 3, int(gluster_v_info['distCount']), msg) # Verify bricks count in gluster v info msg = ( "Volume %s does not have bricks count multiple of 3. It has %s" % (vol_name, gluster_v_info['brickCount'])) self.assertFalse(int(gluster_v_info['brickCount']) % 3, msg)
def test_dev_path_mapping_heketi_device_delete(self): """Validate dev path mapping for heketi device delete lifecycle""" h_client, h_url = self.heketi_client_node, self.heketi_server_url node_ids = heketi_ops.heketi_node_list(h_client, h_url) self.assertTrue(node_ids, "Failed to get heketi node list") # Fetch #4th node for the operations h_disable_node = node_ids[3] # Fetch bricks on the devices before volume create h_node_details_before, h_node = self._get_bricks_and_device_details() # Bricks count on the node before pvc creation brick_count_before = [count[1] for count in h_node_details_before] # Create file volume with app pod and verify IO's # and compare path, UUID, vg_name pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs() # Check if IO's are running use_percent_after = self._get_space_use_percent_in_app_pod(pod_name) self.assertNotEqual( use_percent, use_percent_after, "Failed to execute IO's in the app pod {} after respin".format( pod_name)) # Fetch bricks on the devices after volume create h_node_details_after, h_node = self._get_bricks_and_device_details() # Bricks count on the node after pvc creation brick_count_after = [count[1] for count in h_node_details_after] self.assertGreater( sum(brick_count_after), sum(brick_count_before), "Failed to add bricks on the node {}".format(h_node)) # Enable the #4th node heketi_ops.heketi_node_enable(h_client, h_url, h_disable_node) node_info = heketi_ops.heketi_node_info(h_client, h_url, h_disable_node, json=True) h_node_id = node_info['id'] self.assertEqual(node_info['state'], "online", "Failed to enable node {}".format(h_disable_node)) # Fetch device list i.e to be deleted h_node_info = heketi_ops.heketi_node_info(h_client, h_url, h_node, json=True) devices_list = [[device['id'], device['name']] for device in h_node_info['devices']] # Device deletion operation for device in devices_list: device_id, device_name = device[0], device[1] self.addCleanup(heketi_ops.heketi_device_enable, h_client, h_url, device_id, raise_on_error=False) # Disable device from heketi device_disable = heketi_ops.heketi_device_disable( h_client, h_url, device_id) self.assertTrue( device_disable, "Device {} could not be disabled".format(device_id)) device_info = heketi_ops.heketi_device_info(h_client, h_url, device_id, json=True) self.assertEqual(device_info['state'], "offline", "Failed to disable device {}".format(device_id)) # Remove device from heketi device_remove = heketi_ops.heketi_device_remove( h_client, h_url, device_id) self.assertTrue(device_remove, "Device {} could not be removed".format(device_id)) # Bricks after device removal device_info = heketi_ops.heketi_device_info(h_client, h_url, device_id, json=True) bricks_count_after = len(device_info['bricks']) self.assertFalse( bricks_count_after, "Failed to remove the bricks from the device {}".format( device_id)) # Delete device from heketi self.addCleanup(heketi_ops.heketi_device_add, h_client, h_url, device_name, h_node, raise_on_error=False) device_delete = heketi_ops.heketi_device_delete( h_client, h_url, device_id) self.assertTrue(device_delete, "Device {} could not be deleted".format(device_id)) # Check if IO's are running after device is deleted use_percent_after = self._get_space_use_percent_in_app_pod(pod_name) self.assertNotEqual( use_percent, use_percent_after, "Failed to execute IO's in the app pod {} after respin".format( pod_name)) # Add device operations for device in devices_list: device_name = device[1] # Add device back to the node heketi_ops.heketi_device_add(h_client, h_url, device_name, h_node) # Fetch device info after device add node_info = heketi_ops.heketi_node_info(h_client, h_url, h_node, json=True) device_id = None for device in node_info["devices"]: if device["name"] == device_name: device_id = device["id"] break self.assertTrue( device_id, "Failed to add device {} on node" " {}".format(device_name, h_node)) # Disable the #4th node heketi_ops.heketi_node_disable(h_client, h_url, h_node_id) node_info = heketi_ops.heketi_node_info(h_client, h_url, h_node_id, json=True) self.assertEqual(node_info['state'], "offline", "Failed to disable node {}".format(h_node_id)) pvc_amount, pvc_size = 5, 1 # Fetch bricks on the devices before volume create h_node_details_before, h_node = self._get_bricks_and_device_details() # Bricks count on the node before pvc creation brick_count_before = [count[1] for count in h_node_details_before] # Create file volumes pvc_name = self.create_and_wait_for_pvcs(pvc_size=pvc_size, pvc_amount=pvc_amount) self.assertEqual(len(pvc_name), pvc_amount, "Failed to create {} pvc".format(pvc_amount)) # Fetch bricks on the devices after volume create h_node_details_after, h_node = self._get_bricks_and_device_details() # Bricks count on the node after pvc creation brick_count_after = [count[1] for count in h_node_details_after] self.assertGreater( sum(brick_count_after), sum(brick_count_before), "Failed to add bricks on the node {}".format(h_node)) # Check if IO's are running after new device is added use_percent_after = self._get_space_use_percent_in_app_pod(pod_name) self.assertNotEqual( use_percent, use_percent_after, "Failed to execute IO's in the app pod {} after respin".format( pod_name))
def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest("Nodes are expected to have at least 2 devices") if not all([ int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2] ]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min( [n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > (devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_heketi_device_replacement_in_node(self): """Validate device replacement operation on single node""" h_client, h_server = self.heketi_client_node, self.heketi_server_url try: gluster_server_0 = list(g.config["gluster_servers"].values())[0] manage_hostname = gluster_server_0["manage"] add_device_name = gluster_server_0["additional_devices"][0] except (KeyError, IndexError): self.skipTest( "Additional disk is not specified for node with following " "hostnames and IP addresses: {}, {}".format( gluster_server_0.get('manage', '?'), gluster_server_0.get('storage', '?'))) # Get existing heketi volume list existing_volumes = heketi_volume_list(h_client, h_server, json=True) # Add cleanup function to clean stale volumes created during test self.addCleanup(self._cleanup_heketi_volumes, existing_volumes.get("volumes")) # Get nodes info node_id_list = heketi_node_list(h_client, h_server) # Disable 4th and other nodes if len(node_id_list) > 3: for node_id in node_id_list[3:]: heketi_node_disable(h_client, h_server, node_id) self.addCleanup(heketi_node_enable, h_client, h_server, node_id) # Create volume when 3 nodes are online vol_size, vol_count = 2, 4 for _ in range(vol_count): vol_info = heketi_blockvolume_create(h_client, h_server, vol_size, json=True) self.addCleanup(heketi_blockvolume_delete, h_client, h_server, vol_info['id']) # Get node ID of the Gluster hostname topology_info = heketi_topology_info(h_client, h_server, json=True) self.assertIsNotNone(topology_info, "Failed to get topology info") self.assertIn("clusters", topology_info.keys(), "Failed to get cluster " "details from topology info") node_list = topology_info["clusters"][0]["nodes"] self.assertTrue(node_list, "Cluster info command returned empty list of nodes") node_id = None for node in node_list: if manage_hostname == node['hostnames']["manage"][0]: node_id = node["id"] break self.assertTrue( node_id, "Failed to get node info for node id '{}'".format( manage_hostname)) # Add extra device, then remember it's ID and size device_id_new, device_size_new = self._add_heketi_device( add_device_name, node_id) # Remove one of the existing devices on node except new device device_name, device_id = None, None node_info_after_addition = heketi_node_info(h_client, h_server, node_id, json=True) for device in node_info_after_addition["devices"]: if (device["name"] != add_device_name and device["storage"]["total"] == device_size_new): device_name = device["name"] device_id = device["id"] break self.assertIsNotNone(device_name, "Failed to get device name") self.assertIsNotNone(device_id, "Failed to get device id") self.addCleanup(heketi_device_enable, h_client, h_server, device_id, raise_on_error=False) self.addCleanup(heketi_device_add, h_client, h_server, device_name, node_id, raise_on_error=False) heketi_device_disable(h_client, h_server, device_id) heketi_device_remove(h_client, h_server, device_id) heketi_device_delete(h_client, h_server, device_id)
def test_heketi_device_remove(self, delete_device): """Validate remove/delete device using heketi-cli""" gluster_server_0 = list(g.config["gluster_servers"].values())[0] try: device_name = gluster_server_0["additional_devices"][0] except (KeyError, IndexError): self.skipTest( "Additional disk is not specified for node with following " "hostnames and IP addresses: %s, %s." % (gluster_server_0.get( 'manage', '?'), gluster_server_0.get('storage', '?'))) manage_hostname = gluster_server_0["manage"] # Get node ID of the Gluster hostname topo_info = heketi_topology_info(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(topo_info["clusters"][0]["nodes"], "Cluster info command returned empty list of nodes.") node_id = None for node in topo_info["clusters"][0]["nodes"]: if manage_hostname == node['hostnames']["manage"][0]: node_id = node["id"] break self.assertNotEqual( node_id, None, "No information about node_id for %s" % manage_hostname) # Iterate chosen node devices and pick the smallest online one. lowest_device_size = lowest_device_id = None online_hosts = self.get_online_nodes_disable_redundant() for host in online_hosts[0:3]: if node_id != host["id"]: continue for device in host["devices"]: if device["state"].strip().lower() != "online": continue if (lowest_device_size is None or device["storage"]["total"] < lowest_device_size): lowest_device_size = device["storage"]["total"] lowest_device_id = device["id"] lowest_device_name = device["name"] if lowest_device_id is None: self.skipTest( "Didn't find suitable device for disablement on '%s' node." % (node_id)) # Create volume vol_size = 1 vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) # Add extra device, then remember it's ID and size heketi_device_add(self.heketi_client_node, self.heketi_server_url, device_name, node_id) node_info_after_addition = heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) for device in node_info_after_addition["devices"]: if device["name"] != device_name: continue device_id_new = device["id"] device_size_new = device["storage"]["total"] self.addCleanup(heketi_device_delete, self.heketi_client_node, self.heketi_server_url, device_id_new) self.addCleanup(heketi_device_remove, self.heketi_client_node, self.heketi_server_url, device_id_new) self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, device_id_new) if lowest_device_size > device_size_new: skip_msg = ("Skip test case, because newly added disk %s is " "smaller than device which we want to remove %s." % (device_size_new, lowest_device_size)) self.skipTest(skip_msg) g.log.info("Removing device id %s" % lowest_device_id) with self.assertRaises(AssertionError): out = heketi_device_remove(self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.assertFalse(True, "Device removal didn't fail: %s" % out) g.log.info("Device removal failed as expected") # Need to disable device before removing heketi_device_disable(self.heketi_client_node, self.heketi_server_url, lowest_device_id) if not delete_device: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) # Remove device from Heketi try: heketi_device_remove(self.heketi_client_node, self.heketi_server_url, lowest_device_id) except Exception: if delete_device: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) raise if not delete_device: self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) if delete_device: try: heketi_device_delete(self.heketi_client_node, self.heketi_server_url, lowest_device_id) except Exception: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) raise self.addCleanup(heketi_device_add, self.heketi_client_node, self.heketi_server_url, lowest_device_name, node_id) # Create volume vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) if delete_device: return # Check that none of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], lowest_device_id) self.assertFalse( present, "Some of the '%s' volume bricks is present of the removed " "'%s' device." % (vol_info['id'], lowest_device_id))
def test_heketi_prometheus_usedbytes_brickcount_on_device_delete( self, operation): """Validate used bytes,device count on heketi and prometheus""" h_node, h_server = self.heketi_client_node, self.heketi_server_url # Get list of additional devices for one of the Gluster nodes gluster_server_0 = list(self.gluster_servers_info.values())[0] manage_hostname = gluster_server_0.get("manage") self.assertTrue( manage_hostname, "IP Address is not specified for " "node {}".format(gluster_server_0)) device_name = gluster_server_0.get("additional_devices")[0] self.assertTrue( device_name, "Additional devices are not specified for " "node {}".format(gluster_server_0)) # Get node ID of the Gluster hostname node_list = heketi_ops.heketi_topology_info( h_node, h_server, json=True).get("clusters")[0].get("nodes") self.assertTrue( node_list, "Cluster info command returned empty list of nodes") node_id = [ node.get("id") for node in node_list if manage_hostname == node.get("hostnames").get("manage")[0]] self.assertTrue( node_id, "Failed to get node_id for {}".format(manage_hostname)) node_id = node_id[0] # Adding heketi device heketi_ops.heketi_device_add(h_node, h_server, device_name, node_id) node_info_after_addition = heketi_ops.heketi_node_info( h_node, h_server, node_id, json=True) device_id, bricks = None, None for device in node_info_after_addition.get("devices"): if device.get("name") == device_name: device_id, bricks = ( device.get("id"), len(device.get("bricks"))) break # Verify zero bricks on the device msg = ( "Number of bricks on the device {} of the nodes should be" "zero".format(device_name)) self.assertFalse(bricks, msg) self.addCleanup( heketi_ops.heketi_device_delete, h_node, h_server, device_id, raise_on_error=False) self.addCleanup( heketi_ops.heketi_device_remove, h_node, h_server, device_id, raise_on_error=False) self.addCleanup( heketi_ops.heketi_device_disable, h_node, h_server, device_id, raise_on_error=False) # Disable,Remove and Delete heketi device heketi_ops.heketi_device_disable(h_node, h_server, device_id) heketi_ops.heketi_device_remove(h_node, h_server, device_id) heketi_ops.heketi_device_delete(h_node, h_server, device_id) # Verify device deletion node_info_after_deletion = ( heketi_ops.heketi_node_info(h_node, h_server, node_id)) msg = ("Device {} should not be shown in node info of the node {}" "after the device deletion".format(device_id, node_id)) self.assertNotIn(device_id, node_info_after_deletion, msg) if operation == "usedbytes": # Validate heketi and prometheus device used bytes for w in waiter.Waiter(timeout=60, interval=10): device_used_bytes_prometheus = 0 device_used_bytes_metrics = 0 openshift_ops.switch_oc_project( self.ocp_master_node[0], 'openshift-monitoring') metric_result = self._fetch_metric_from_promtheus_pod( metric='heketi_device_used_bytes') for result in metric_result: if (node_id == result.get('cluster') and device_name == result.get('device')): device_used_bytes_prometheus += ( int(result.get('value')[1])) openshift_ops.switch_oc_project( self.ocp_master_node[0], 'glusterfs') metrics = heketi_ops.get_heketi_metrics(h_node, h_server) heketi_device_count_metric = ( metrics.get('heketi_device_used_bytes')) for result in heketi_device_count_metric: if (node_id == result.get('cluster') and device_name == result.get('device')): device_used_bytes_metrics = int(result.get('value')) if device_used_bytes_prometheus == device_used_bytes_metrics: break if w.expired: raise exceptions.ExecutionError( "Failed to update device details in prometheus") elif operation == "brickcount": # Validate heketi and prometheus device brick count for w in waiter.Waiter(timeout=60, interval=10): device_brick_count_prometheus = 0 device_brick_count_metrics = 0 metrics = heketi_ops.get_heketi_metrics(h_node, h_server) heketi_device_count_metric = metrics.get( 'heketi_device_brick_count') for result in heketi_device_count_metric: device_brick_count_metrics += int(result.get('value')) openshift_ops.switch_oc_project( self.ocp_master_node[0], 'openshift-monitoring') metric_result = self._fetch_metric_from_promtheus_pod( metric='heketi_device_brick_count') for result in metric_result: device_brick_count_prometheus += ( int(result.get('value')[1])) if device_brick_count_prometheus == device_brick_count_metrics: break if w.expired: raise exceptions.ExecutionError( "Failed to update device details in prometheus")
def test_volume_expansion_no_free_space(self): """Validate volume expansion when there is no free space""" vol_size, expand_size, additional_devices_attached = None, 10, {} h_node, h_server_url = self.heketi_client_node, self.heketi_server_url # Get nodes info heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url) if len(heketi_node_id_list) < 3: self.skipTest("3 Heketi nodes are required.") # Disable 4th and other nodes for node_id in heketi_node_id_list[3:]: heketi_ops.heketi_node_disable(h_node, h_server_url, node_id) self.addCleanup( heketi_ops.heketi_node_enable, h_node, h_server_url, node_id) # Prepare first 3 nodes smallest_size = None err_msg = '' for node_id in heketi_node_id_list[0:3]: node_info = heketi_ops.heketi_node_info( h_node, h_server_url, node_id, json=True) # Disable second and other devices devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_id) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if (smallest_size is None or devices[0]["storage"]["free"] < smallest_size): smallest_size = devices[0]["storage"]["free"] for device in node_info["devices"][1:]: heketi_ops.heketi_device_disable( h_node, h_server_url, device["id"]) self.addCleanup( heketi_ops.heketi_device_enable, h_node, h_server_url, device["id"]) # Gather info about additional devices additional_device_name = None for gluster_server in self.gluster_servers: gluster_server_data = self.gluster_servers_info[gluster_server] g_manage = gluster_server_data["manage"] g_storage = gluster_server_data["storage"] if not (g_manage in node_info["hostnames"]["manage"] or g_storage in node_info["hostnames"]["storage"]): continue additional_device_name = (( gluster_server_data.get("additional_devices") or [''])[0]) break if not additional_device_name: err_msg += ("No 'additional_devices' are configured for " "'%s' node, which has following hostnames and " "IP addresses: %s.\n" % ( node_id, ', '.join( node_info["hostnames"]["manage"] + node_info["hostnames"]["storage"]))) continue heketi_ops.heketi_device_add( h_node, h_server_url, additional_device_name, node_id) additional_devices_attached.update( {node_id: additional_device_name}) # Schedule cleanup of the added devices for node_id in additional_devices_attached.keys(): node_info = heketi_ops.heketi_node_info( h_node, h_server_url, node_id, json=True) for device in node_info["devices"]: if device["name"] != additional_devices_attached[node_id]: continue self.addCleanup(self.detach_devices_attached, device["id"]) break else: self.fail("Could not find ID for added device on " "'%s' node." % node_id) if err_msg: self.skipTest(err_msg) # Temporary disable new devices self.disable_devices(additional_devices_attached) # Create volume and save info about it vol_size = int(smallest_size / (1024**2)) - 1 creation_info = heketi_ops.heketi_volume_create( h_node, h_server_url, vol_size, json=True) volume_name, volume_id = creation_info["name"], creation_info["id"] self.addCleanup( heketi_ops.heketi_volume_delete, h_node, h_server_url, volume_id, raise_on_error=False) volume_info_before_expansion = heketi_ops.heketi_volume_info( h_node, h_server_url, volume_id, json=True) num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) self.get_brick_and_volume_status(volume_name) free_space_before_expansion = self.get_devices_summary_free_space() # Try to expand volume with not enough device space self.assertRaises( AssertionError, heketi_ops.heketi_volume_expand, h_node, h_server_url, volume_id, expand_size) # Enable new devices to be able to expand our volume self.enable_devices(additional_devices_attached) # Expand volume and validate results heketi_ops.heketi_volume_expand( h_node, h_server_url, volume_id, expand_size, json=True) free_space_after_expansion = self.get_devices_summary_free_space() self.assertGreater( free_space_before_expansion, free_space_after_expansion, "Free space not consumed after expansion of %s" % volume_id) num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) self.get_brick_and_volume_status(volume_name) volume_info_after_expansion = heketi_ops.heketi_volume_info( h_node, h_server_url, volume_id, json=True) self.assertGreater( volume_info_after_expansion["size"], volume_info_before_expansion["size"], "Size of %s not increased" % volume_id) self.assertGreater( num_of_bricks_after_expansion, num_of_bricks_before_expansion) self.assertEqual( num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0) # Delete volume and validate release of the used space heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id) free_space_after_deletion = self.get_devices_summary_free_space() self.assertGreater( free_space_after_deletion, free_space_after_expansion, "Free space not reclaimed after deletion of volume %s" % volume_id)
def _pv_resize(self, exceed_free_space): dir_path = "/mnt" pvc_size_gb, min_free_space_gb = 1, 3 # Get available free space disabling redundant devices and nodes heketi_url = self.heketi_server_url node_id_list = heketi_ops.heketi_node_list( self.heketi_client_node, heketi_url) self.assertTrue(node_id_list) nodes = {} min_free_space = min_free_space_gb * 1024**2 for node_id in node_id_list: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, heketi_url, node_id, json=True) if (node_info['state'].lower() != 'online' or not node_info['devices']): continue if len(nodes) > 2: out = heketi_ops.heketi_node_disable( self.heketi_client_node, heketi_url, node_id) self.assertTrue(out) self.addCleanup( heketi_ops.heketi_node_enable, self.heketi_client_node, heketi_url, node_id) for device in node_info['devices']: if device['state'].lower() != 'online': continue free_space = device['storage']['free'] if (node_id in nodes.keys() or free_space < min_free_space): out = heketi_ops.heketi_device_disable( self.heketi_client_node, heketi_url, device['id']) self.assertTrue(out) self.addCleanup( heketi_ops.heketi_device_enable, self.heketi_client_node, heketi_url, device['id']) continue nodes[node_id] = free_space if len(nodes) < 3: raise self.skipTest( "Could not find 3 online nodes with, " "at least, 1 online device having free space " "bigger than %dGb." % min_free_space_gb) # Calculate maximum available size for PVC available_size_gb = int(min(nodes.values()) / (1024**2)) # Create PVC self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb) # Create DC with POD and attached PVC to it dc_name = oc_create_app_dc_with_io(self.node, pvc_name) self.addCleanup(oc_delete, self.node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) if exceed_free_space: # Try to expand existing PVC exceeding free space resize_pvc(self.node, pvc_name, available_size_gb) wait_for_events(self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed') # Check that app POD is up and runnig then try to write data wait_for_pod_be_ready(self.node, pod_name) cmd = ( "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path) ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( ret, 0, "Failed to write data after failed attempt to expand PVC.") else: # Expand existing PVC using all the available free space expand_size_gb = available_size_gb - pvc_size_gb resize_pvc(self.node, pvc_name, expand_size_gb) verify_pvc_size(self.node, pvc_name, expand_size_gb) pv_name = get_pv_name_from_pvc(self.node, pvc_name) verify_pv_size(self.node, pv_name, expand_size_gb) wait_for_events( self.node, obj_name=pvc_name, event_reason='VolumeResizeSuccessful') # Recreate app POD oc_delete(self.node, 'pod', pod_name) wait_for_resource_absence(self.node, 'pod', pod_name) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) # Write data on the expanded PVC cmd = ("dd if=/dev/urandom of=%s/autotest " "bs=1M count=1025" % dir_path) ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( ret, 0, "Failed to write data on the expanded PVC")
def test_heketi_device_enable_disable(self): """Validate device enable and disable functionality""" # Disable all but one device on the first online node online_hosts = self.get_online_nodes_disable_redundant() online_device_id = "" for device in online_hosts[0]["devices"]: if device["state"].strip().lower() != "online": continue device_id = device["id"] if online_device_id == "": online_device_id = device_id else: g.log.info("going to disable device %s", device_id) heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device_id) self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device_id) if online_device_id == "": self.skipTest("No device online on node %s" % online_hosts[0]["id"]) # Create volume when only 1 device is online vol_size = 1 vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.assertTrue( vol_info, ("Failed to create heketi volume of size %d" % vol_size)) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) # Check that one of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], online_device_id) self.assertTrue( present, "None of '%s' volume bricks is present on the '%s' device." % (vol_info['id'], online_device_id)) g.log.info("Going to disable device id %s", online_device_id) heketi_device_disable(self.heketi_client_node, self.heketi_server_url, online_device_id) self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, online_device_id) with self.assertRaises(AssertionError): out = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["id"]) self.assertFalse(True, "Volume creation didn't fail: %s" % out) g.log.info("Volume creation failed as expected") # Enable back the device which was previously disabled g.log.info("Going to enable device id %s", online_device_id) heketi_device_enable(self.heketi_client_node, self.heketi_server_url, online_device_id) # Create volume when device is enabled vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) # Check that one of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], online_device_id) self.assertTrue( present, "None of '%s' volume bricks is present on the '%s' device." % (vol_info['id'], online_device_id))
def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" # Get nodes info node_id_list = heketi_ops.heketi_node_list( self.heketi_client_node, self.heketi_server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes if len(node_id_list) > 3: for node in node_id_list[3:]: heketi_ops.heketi_node_disable( self.heketi_client_node, self.heketi_server_url, node_id) self.addCleanup( heketi_ops.heketi_node_enable, self.heketi_client_node, self.heketi_server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup( heketi_ops.heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device["id"]) # Create heketi volume out = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create heketi volume of size 1") g.log.info("Successfully created heketi volume of size 1") device_id = out["bricks"][0]["device"] self.addCleanup( heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) # Disable device g.log.info("Disabling '%s' device" % device_id) out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to disable the device %s" % device_id) g.log.info("Successfully disabled device %s" % device_id) try: # Get device info g.log.info("Retrieving '%s' device info" % device_id) out = heketi_ops.heketi_device_info( self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, "Failed to get device info %s" % device_id) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] if out["state"].lower().strip() != "offline": raise exceptions.ExecutionError( "Device %s is not in offline state." % name) g.log.info("Device %s is now offine" % name) # Try to create heketi volume g.log.info("Creating heketi volume: Expected to fail.") try: out = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 1, json=True) except exceptions.ExecutionError: g.log.info("Volume was not created as expected.") else: self.addCleanup( heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) msg = "Volume unexpectedly created. Out: %s" % out assert False, msg finally: # Enable the device back g.log.info("Enable '%s' device back." % device_id) out = heketi_ops.heketi_device_enable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to enable the device %s" % device_id) g.log.info("Successfully enabled device %s" % device_id) # Get device info out = heketi_ops.heketi_device_info( self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, ("Failed to get device info %s" % device_id)) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] if out["state"] != "online": raise exceptions.ExecutionError( "Device %s is not in online state." % name) # Create heketi volume of size out = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create volume of size 1") self.addCleanup( heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) g.log.info("Successfully created volume of size 1") name = out["name"] # Get gluster volume info vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) self.assertTrue(vol_info, "Failed to get '%s' volume info." % name) g.log.info("Successfully got the '%s' volume info." % name)