def _get_vol_size(self): # Get available free space disabling redundant nodes min_free_space_gb = 5 heketi_url = self.heketi_server_url node_ids = heketi_node_list(self.heketi_client_node, heketi_url) self.assertTrue(node_ids) nodes = {} min_free_space = min_free_space_gb * 1024**2 for node_id in node_ids: node_info = heketi_node_info( self.heketi_client_node, heketi_url, node_id, json=True) if (node_info['state'].lower() != 'online' or not node_info['devices']): continue if len(nodes) > 2: out = heketi_node_disable( self.heketi_client_node, heketi_url, node_id) self.assertTrue(out) self.addCleanup( heketi_node_enable, self.heketi_client_node, heketi_url, node_id) for device in node_info['devices']: if device['state'].lower() != 'online': continue free_space = device['storage']['free'] if free_space < min_free_space: out = heketi_device_disable( self.heketi_client_node, heketi_url, device['id']) self.assertTrue(out) self.addCleanup( heketi_device_enable, self.heketi_client_node, heketi_url, device['id']) continue if node_id not in nodes: nodes[node_id] = [] nodes[node_id].append(device['storage']['free']) # Skip test if nodes requirements are not met if (len(nodes) < 3 or not all(map((lambda _list: len(_list) > 1), nodes.values()))): raise self.skipTest( "Could not find 3 online nodes with, " "at least, 2 online devices having free space " "bigger than %dGb." % min_free_space_gb) # Calculate size of a potential distributed vol vol_size_gb = int(min(map(max, nodes.values())) / (1024 ** 2)) + 1 return vol_size_gb
def detach_devices_attached(self, device_id_list): """ All the devices attached are gracefully detached in this function """ if not isinstance(device_id_list, (tuple, set, list)): device_id_list = [device_id_list] for device_id in device_id_list: device_disable = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual(device_disable, False, "Device %s could not be disabled" % device_id) device_remove = heketi_ops.heketi_device_remove( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual(device_remove, False, "Device %s could not be removed" % device_id) device_delete = heketi_ops.heketi_device_delete( self.heketi_client_node, self.heketi_server_url, device_id) self.assertNotEqual(device_delete, False, "Device %s could not be deleted" % device_id)
def test_device_enable_disable(self): """Validate device enable and disable functionality""" # Disable all but one device on the first online node online_hosts = self.get_online_nodes_disable_redundant() online_device_id = "" for device in online_hosts[0]["devices"]: if device["state"].strip().lower() != "online": continue device_id = device["id"] if online_device_id == "": online_device_id = device_id else: g.log.info("going to disable device %s", device_id) heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device_id) self.addCleanup( heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device_id) if online_device_id == "": self.skipTest( "No device online on node %s" % online_hosts[0]["id"]) # Create volume when only 1 device is online vol_size = 1 vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.assertTrue(vol_info, ( "Failed to create heketi volume of size %d" % vol_size)) self.addCleanup(self.delete_volumes, vol_info['id']) # Check that one of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], online_device_id) self.assertTrue( present, "None of '%s' volume bricks is present on the '%s' device." % ( vol_info['id'], online_device_id)) g.log.info("Going to disable device id %s", online_device_id) heketi_device_disable( self.heketi_client_node, self.heketi_server_url, online_device_id) self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, online_device_id) ret, out, err = heketi_volume_create( self.heketi_client_node, self.heketi_server_url, vol_size, json=True, raw_cli_output=True) if ret == 0: self.addCleanup(self.delete_volumes, json.loads(out)["id"]) self.assertNotEqual(ret, 0, ("Volume creation did not fail. ret- %s " "out- %s err- %s" % (ret, out, err))) g.log.info("Volume creation failed as expected, err- %s", err) # Enable back the device which was previously disabled g.log.info("Going to enable device id %s", online_device_id) heketi_device_enable( self.heketi_client_node, self.heketi_server_url, online_device_id) # Create volume when device is enabled vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.assertTrue(vol_info, ( "Failed to create heketi volume of size %d" % vol_size)) self.addCleanup(self.delete_volumes, vol_info['id']) # Check that one of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], online_device_id) self.assertTrue( present, "None of '%s' volume bricks is present on the '%s' device." % ( vol_info['id'], online_device_id))
def test_heketi_with_device_removal_insuff_space(self): """Validate heketi with device removal insufficient space""" # Disable 4+ nodes and 3+ devices on the first 3 nodes min_free_space_gb = 5 min_free_space = min_free_space_gb * 1024**2 heketi_url = self.heketi_server_url heketi_node = self.heketi_client_node nodes = {} node_ids = heketi_node_list(heketi_node, heketi_url) self.assertTrue(node_ids) for node_id in node_ids: node_info = heketi_node_info( heketi_node, heketi_url, node_id, json=True) if (node_info["state"].lower() != "online" or not node_info["devices"]): continue if len(nodes) > 2: heketi_node_disable(heketi_node, heketi_url, node_id) self.addCleanup( heketi_node_enable, heketi_node, heketi_url, node_id) continue for device in node_info["devices"]: if device["state"].lower() != "online": continue free_space = device["storage"]["free"] if node_id not in nodes: nodes[node_id] = [] if (free_space < min_free_space or len(nodes[node_id]) > 1): heketi_device_disable( heketi_node, heketi_url, device["id"]) self.addCleanup( heketi_device_enable, heketi_node, heketi_url, device["id"]) continue nodes[node_id].append({ "device_id": device["id"], "free": free_space}) # Skip test if nodes requirements are not met if (len(nodes) < 3 or not all(map((lambda _list: len(_list) > 1), nodes.values()))): raise self.skipTest( "Could not find 3 online nodes with 2 online devices " "having free space bigger than %dGb." % min_free_space_gb) # Calculate size of a potential distributed vol if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]: index = 0 else: index = 1 vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024 ** 2)) + 1 device_id = nodes[node_ids[0]][index]["device_id"] # Create volume with such size that we consume space more than # size of smaller disks try: heketi_vol = heketi_volume_create( heketi_node, heketi_url, vol_size_gb, json=True) except Exception as e: g.log.warning( "Got following error trying to create '%s'Gb vol: %s" % ( vol_size_gb, e)) vol_size_gb -= 1 heketi_vol = heketi_volume_create( heketi_node, heketi_url, vol_size_gb, json=True) self.addCleanup(self.delete_volumes, heketi_vol["bricks"][0]["volume"]) # Try to 'remove' bigger Heketi disk expecting error, # because there is no space on smaller disk to relocate bricks to heketi_device_disable(heketi_node, heketi_url, device_id) self.addCleanup( heketi_device_enable, heketi_node, heketi_url, device_id) try: self.assertRaises( ExecutionError, heketi_device_remove, heketi_node, heketi_url, device_id) except Exception: self.addCleanup( heketi_device_disable, heketi_node, heketi_url, device_id) raise
def test_device_remove_operation(self, delete_device): """Validate remove/delete device using heketi-cli""" gluster_server_0 = g.config["gluster_servers"].values()[0] try: device_name = gluster_server_0["additional_devices"][0] except (KeyError, IndexError): self.skipTest( "Additional disk is not specified for node with following " "hostnames and IP addresses: %s, %s." % ( gluster_server_0.get('manage', '?'), gluster_server_0.get('storage', '?'))) manage_hostname = gluster_server_0["manage"] # Get node ID of the Gluster hostname topo_info = heketi_topology_info(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue( topo_info["clusters"][0]["nodes"], "Cluster info command returned empty list of nodes.") node_id = None for node in topo_info["clusters"][0]["nodes"]: if manage_hostname == node['hostnames']["manage"][0]: node_id = node["id"] break self.assertNotEqual( node_id, None, "No information about node_id for %s" % manage_hostname) # Iterate chosen node devices and pick the smallest online one. lowest_device_size = lowest_device_id = None online_hosts = self.get_online_nodes_disable_redundant() for host in online_hosts[0:3]: if node_id != host["id"]: continue for device in host["devices"]: if device["state"].strip().lower() != "online": continue if (lowest_device_size is None or device["storage"]["total"] < lowest_device_size): lowest_device_size = device["storage"]["total"] lowest_device_id = device["id"] lowest_device_name = device["name"] if lowest_device_id is None: self.skipTest( "Didn't find suitable device for disablement on '%s' node." % ( node_id)) # Create volume vol_size = 1 vol_info = heketi_volume_create( self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.assertTrue(vol_info, ( "Failed to create heketi volume of size %d" % vol_size)) self.addCleanup(self.delete_volumes, vol_info['id']) # Add extra device, then remember it's ID and size heketi_device_add(self.heketi_client_node, self.heketi_server_url, device_name, node_id) node_info_after_addition = heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) for device in node_info_after_addition["devices"]: if device["name"] != device_name: continue device_id_new = device["id"] device_size_new = device["storage"]["total"] self.addCleanup(heketi_device_delete, self.heketi_client_node, self.heketi_server_url, device_id_new) self.addCleanup(heketi_device_remove, self.heketi_client_node, self.heketi_server_url, device_id_new) self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, device_id_new) if lowest_device_size > device_size_new: skip_msg = ("Skip test case, because newly added disk %s is " "smaller than device which we want to remove %s." % ( device_size_new, lowest_device_size)) self.skipTest(skip_msg) g.log.info("Removing device id %s" % lowest_device_id) ret, out, err = heketi_device_remove( self.heketi_client_node, self.heketi_server_url, lowest_device_id, raw_cli_output=True) if ret == 0: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.assertNotEqual(ret, 0, ( "Device removal did not fail. ret: %s, out: %s, err: %s." % ( ret, out, err))) g.log.info("Device removal failed as expected, err- %s", err) # Need to disable device before removing heketi_device_disable( self.heketi_client_node, self.heketi_server_url, lowest_device_id) if not delete_device: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) # Remove device from Heketi try: heketi_device_remove( self.heketi_client_node, self.heketi_server_url, lowest_device_id) except Exception: if delete_device: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) raise if not delete_device: self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) if delete_device: try: heketi_device_delete( self.heketi_client_node, self.heketi_server_url, lowest_device_id) except Exception: self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) self.addCleanup(heketi_device_disable, self.heketi_client_node, self.heketi_server_url, lowest_device_id) raise self.addCleanup( heketi_device_add, self.heketi_client_node, self.heketi_server_url, lowest_device_name, node_id) # Create volume vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.assertTrue(vol_info, ( "Failed to create heketi volume of size %d" % vol_size)) self.addCleanup(self.delete_volumes, vol_info['id']) if delete_device: return # Check that none of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], lowest_device_id) self.assertFalse( present, "Some of the '%s' volume bricks is present of the removed " "'%s' device." % (vol_info['id'], lowest_device_id))
def _pv_resize(self, exceed_free_space): dir_path = "/mnt" pvc_size_gb, min_free_space_gb = 1, 3 # Get available free space disabling redundant devices and nodes heketi_url = self.heketi_server_url node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, heketi_url) self.assertTrue(node_id_list) nodes = {} min_free_space = min_free_space_gb * 1024**2 for node_id in node_id_list: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, heketi_url, node_id, json=True) if (node_info['state'].lower() != 'online' or not node_info['devices']): continue if len(nodes) > 2: out = heketi_ops.heketi_node_disable(self.heketi_client_node, heketi_url, node_id) self.assertTrue(out) self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, heketi_url, node_id) for device in node_info['devices']: if device['state'].lower() != 'online': continue free_space = device['storage']['free'] if (node_id in nodes.keys() or free_space < min_free_space): out = heketi_ops.heketi_device_disable( self.heketi_client_node, heketi_url, device['id']) self.assertTrue(out) self.addCleanup(heketi_ops.heketi_device_enable, self.heketi_client_node, heketi_url, device['id']) continue nodes[node_id] = free_space if len(nodes) < 3: raise self.skipTest("Could not find 3 online nodes with, " "at least, 1 online device having free space " "bigger than %dGb." % min_free_space_gb) # Calculate maximum available size for PVC available_size_gb = int(min(nodes.values()) / (1024**2)) # Create PVC self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb) # Create DC with POD and attached PVC to it dc_name = oc_create_app_dc_with_io(self.node, pvc_name) self.addCleanup(oc_delete, self.node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) if exceed_free_space: # Try to expand existing PVC exceeding free space resize_pvc(self.node, pvc_name, available_size_gb) wait_for_events(self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed') # Check that app POD is up and runnig then try to write data wait_for_pod_be_ready(self.node, pod_name) cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path) ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( ret, 0, "Failed to write data after failed attempt to expand PVC.") else: # Expand existing PVC using all the available free space expand_size_gb = available_size_gb - pvc_size_gb resize_pvc(self.node, pvc_name, expand_size_gb) verify_pvc_size(self.node, pvc_name, expand_size_gb) pv_name = get_pv_name_from_pvc(self.node, pvc_name) verify_pv_size(self.node, pv_name, expand_size_gb) wait_for_events(self.node, obj_name=pvc_name, event_reason='VolumeResizeSuccessful') # Recreate app POD oc_delete(self.node, 'pod', pod_name) wait_for_resource_absence(self.node, 'pod', pod_name) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) # Write data on the expanded PVC cmd = ("dd if=/dev/urandom of=%s/autotest " "bs=1M count=1025" % dir_path) ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual(ret, 0, "Failed to write data on the expanded PVC")
def test_volume_creation_no_free_devices(self): """Validate heketi error is returned when no free devices available""" node, server_url = self.heketi_client_node, self.heketi_server_url # Get nodes info node_id_list = heketi_ops.heketi_node_list(node, server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info(node, server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes for node_id in node_id_list[3:]: heketi_ops.heketi_node_disable(node, server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, node, server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable(node, server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, node, server_url, device["id"]) # Calculate common available space available_spaces = [ int(node_info["devices"][0]["storage"]["free"]) for n in node_info_list[0:3] ] min_space_gb = int(min(available_spaces) / 1024**2) self.assertGreater(min_space_gb, 3, "Not enough available free space.") # Create first small volume vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True) self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol["id"]) # Try to create second volume getting "no free space" error try: vol_fail = heketi_ops.heketi_volume_create(node, server_url, min_space_gb, json=True) except exceptions.ExecutionError: g.log.info("Volume was not created as expected.") else: self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_fail["bricks"][0]["volume"]) self.assertFalse( vol_fail, "Volume should have not been created. Out: %s" % vol_fail)
def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" # Get nodes info node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, self.heketi_server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes if len(node_id_list) > 3: for node in node_id_list[3:]: heketi_ops.heketi_node_disable(self.heketi_client_node, self.heketi_server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, self.heketi_server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device["id"]) # Create heketi volume out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create heketi volume of size 1") g.log.info("Successfully created heketi volume of size 1") device_id = out["bricks"][0]["device"] self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) # Disable device g.log.info("Disabling '%s' device" % device_id) out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to disable the device %s" % device_id) g.log.info("Successfully disabled device %s" % device_id) try: # Get device info g.log.info("Retrieving '%s' device info" % device_id) out = heketi_ops.heketi_device_info(self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, "Failed to get device info %s" % device_id) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] if out["state"].lower().strip() != "offline": raise exceptions.ExecutionError( "Device %s is not in offline state." % name) g.log.info("Device %s is now offine" % name) # Try to create heketi volume g.log.info("Creating heketi volume: Expected to fail.") try: out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) except exceptions.ExecutionError: g.log.info("Volume was not created as expected.") else: self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) msg = "Volume unexpectedly created. Out: %s" % out assert False, msg finally: # Enable the device back g.log.info("Enable '%s' device back." % device_id) out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to enable the device %s" % device_id) g.log.info("Successfully enabled device %s" % device_id) # Get device info out = heketi_ops.heketi_device_info(self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, ("Failed to get device info %s" % device_id)) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] if out["state"] != "online": raise exceptions.ExecutionError( "Device %s is not in online state." % name) # Create heketi volume of size out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create volume of size 1") self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) g.log.info("Successfully created volume of size 1") name = out["name"] # Get gluster volume info vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) self.assertTrue(vol_info, "Failed to get '%s' volume info." % name) g.log.info("Successfully got the '%s' volume info." % name)
def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest("Nodes are expected to have at least 2 devices") if not all([ int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2] ]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min( [n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > (devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_volume_expansion_no_free_space(self): """Validate volume expansion when there is no free space""" vol_size, expand_size, additional_devices_attached = None, 10, {} h_node, h_server_url = self.heketi_client_node, self.heketi_server_url # Get nodes info heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url) if len(heketi_node_id_list) < 3: self.skipTest("3 Heketi nodes are required.") # Disable 4th and other nodes for node_id in heketi_node_id_list[3:]: heketi_ops.heketi_node_disable(h_node, h_server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, h_node, h_server_url, node_id) # Prepare first 3 nodes smallest_size = None err_msg = '' for node_id in heketi_node_id_list[0:3]: node_info = heketi_ops.heketi_node_info(h_node, h_server_url, node_id, json=True) # Disable second and other devices devices = node_info["devices"] self.assertTrue(devices, "Node '%s' does not have devices." % node_id) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if (smallest_size is None or devices[0]["storage"]["free"] < smallest_size): smallest_size = devices[0]["storage"]["free"] for device in node_info["devices"][1:]: heketi_ops.heketi_device_disable(h_node, h_server_url, device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_server_url, device["id"]) # Gather info about additional devices additional_device_name = None for gluster_server in self.gluster_servers: gluster_server_data = self.gluster_servers_info[gluster_server] g_manage = gluster_server_data["manage"] g_storage = gluster_server_data["storage"] if not (g_manage in node_info["hostnames"]["manage"] or g_storage in node_info["hostnames"]["storage"]): continue additional_device_name = (( gluster_server_data.get("additional_devices") or [''])[0]) break if not additional_device_name: err_msg += ( "No 'additional_devices' are configured for " "'%s' node, which has following hostnames and " "IP addresses: %s.\n" % (node_id, ', '.join(node_info["hostnames"]["manage"] + node_info["hostnames"]["storage"]))) continue heketi_ops.heketi_device_add(h_node, h_server_url, additional_device_name, node_id) additional_devices_attached.update( {node_id: additional_device_name}) # Schedule cleanup of the added devices for node_id in additional_devices_attached.keys(): node_info = heketi_ops.heketi_node_info(h_node, h_server_url, node_id, json=True) for device in node_info["devices"]: if device["name"] != additional_devices_attached[node_id]: continue self.addCleanup(self.detach_devices_attached, device["id"]) break else: self.fail("Could not find ID for added device on " "'%s' node." % node_id) if err_msg: self.skipTest(err_msg) # Temporary disable new devices self.disable_devices(additional_devices_attached) # Create volume and save info about it vol_size = int(smallest_size / (1024**2)) - 1 creation_info = heketi_ops.heketi_volume_create(h_node, h_server_url, vol_size, json=True) volume_name, volume_id = creation_info["name"], creation_info["id"] self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_server_url, volume_id, raise_on_error=False) volume_info_before_expansion = heketi_ops.heketi_volume_info( h_node, h_server_url, volume_id, json=True) num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) self.get_brick_and_volume_status(volume_name) free_space_before_expansion = self.get_devices_summary_free_space() # Try to expand volume with not enough device space self.assertRaises(ExecutionError, heketi_ops.heketi_volume_expand, h_node, h_server_url, volume_id, expand_size) # Enable new devices to be able to expand our volume self.enable_devices(additional_devices_attached) # Expand volume and validate results heketi_ops.heketi_volume_expand(h_node, h_server_url, volume_id, expand_size, json=True) free_space_after_expansion = self.get_devices_summary_free_space() self.assertGreater( free_space_before_expansion, free_space_after_expansion, "Free space not consumed after expansion of %s" % volume_id) num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) self.get_brick_and_volume_status(volume_name) volume_info_after_expansion = heketi_ops.heketi_volume_info( h_node, h_server_url, volume_id, json=True) self.assertGreater(volume_info_after_expansion["size"], volume_info_before_expansion["size"], "Size of %s not increased" % volume_id) self.assertGreater(num_of_bricks_after_expansion, num_of_bricks_before_expansion) self.assertEqual( num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0) # Delete volume and validate release of the used space heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id) free_space_after_deletion = self.get_devices_summary_free_space() self.assertGreater( free_space_after_deletion, free_space_after_expansion, "Free space not reclaimed after deletion of volume %s" % volume_id)