def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest("Nodes are expected to have at least 2 devices") if not all([ int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2] ]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min( [n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > (devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_heketi_device_enable_disable(self): """Validate device enable and disable functionality""" # Disable all but one device on the first online node online_hosts = self.get_online_nodes_disable_redundant() online_device_id = "" for device in online_hosts[0]["devices"]: if device["state"].strip().lower() != "online": continue device_id = device["id"] if online_device_id == "": online_device_id = device_id else: g.log.info("going to disable device %s", device_id) heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device_id) self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device_id) if online_device_id == "": self.skipTest("No device online on node %s" % online_hosts[0]["id"]) # Create volume when only 1 device is online vol_size = 1 vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.assertTrue( vol_info, ("Failed to create heketi volume of size %d" % vol_size)) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) # Check that one of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], online_device_id) self.assertTrue( present, "None of '%s' volume bricks is present on the '%s' device." % (vol_info['id'], online_device_id)) g.log.info("Going to disable device id %s", online_device_id) heketi_device_disable(self.heketi_client_node, self.heketi_server_url, online_device_id) self.addCleanup(heketi_device_enable, self.heketi_client_node, self.heketi_server_url, online_device_id) with self.assertRaises(AssertionError): out = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["id"]) self.assertFalse(True, "Volume creation didn't fail: %s" % out) g.log.info("Volume creation failed as expected") # Enable back the device which was previously disabled g.log.info("Going to enable device id %s", online_device_id) heketi_device_enable(self.heketi_client_node, self.heketi_server_url, online_device_id) # Create volume when device is enabled vol_info = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, vol_size, json=True) self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, vol_info['id']) # Check that one of volume's bricks is present on the device present = self.check_any_of_bricks_present_in_device( vol_info['bricks'], online_device_id) self.assertTrue( present, "None of '%s' volume bricks is present on the '%s' device." % (vol_info['id'], online_device_id))
def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" # Get nodes info node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, self.heketi_server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes if len(node_id_list) > 3: for node_id in node_id_list[3:]: heketi_ops.heketi_node_disable(self.heketi_client_node, self.heketi_server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, self.heketi_server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device["id"]) # Create heketi volume out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create heketi volume of size 1") g.log.info("Successfully created heketi volume of size 1") device_id = out["bricks"][0]["device"] self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) # Disable device g.log.info("Disabling '%s' device" % device_id) out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to disable the device %s" % device_id) g.log.info("Successfully disabled device %s" % device_id) try: # Get device info g.log.info("Retrieving '%s' device info" % device_id) out = heketi_ops.heketi_device_info(self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, "Failed to get device info %s" % device_id) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] self.assertEqual(out["state"].lower().strip(), "offline", "Device %s is not in offline state." % name) g.log.info("Device %s is now offine" % name) # Try to create heketi volume g.log.info("Creating heketi volume: Expected to fail.") try: out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) except AssertionError: g.log.info("Volume was not created as expected.") else: self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) msg = "Volume unexpectedly created. Out: %s" % out assert False, msg finally: # Enable the device back g.log.info("Enable '%s' device back." % device_id) out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to enable the device %s" % device_id) g.log.info("Successfully enabled device %s" % device_id) # Get device info out = heketi_ops.heketi_device_info(self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, ("Failed to get device info %s" % device_id)) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] self.assertEqual(out["state"], "online", "Device %s is not in online state." % name) # Create heketi volume of size out = heketi_ops.heketi_volume_create(self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create volume of size 1") self.addCleanup(heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) g.log.info("Successfully created volume of size 1") name = out["name"] # Get gluster volume info vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) self.assertTrue(vol_info, "Failed to get '%s' volume info." % name) g.log.info("Successfully got the '%s' volume info." % name)
def test_pv_resize_device_disabled(self): """Validate resize after disabling all devices except one""" h_node, h_url = self.heketi_client_node, self.heketi_server_url # expand volume size and path volume is mounted expand_size, dir_path = 7, "/mnt" # Get nodes info heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_url) if len(heketi_node_id_list) < 3: self.skipTest( "At-least 3 gluster nodes are required to execute test case") self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc(pvc_size=2) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) dc_name, pod_name = self.create_dc_with_pvc(pvc_name) self._write_file(pod_name, "file1", "1G", dir_path) with self.assertRaises(AssertionError): self._write_file(pod_name, "file2", "3G", dir_path) # Prepare first 3 nodes and then disable other devices. for node_id in heketi_node_id_list[:3]: node_info = heketi_ops.heketi_node_info(h_node, h_url, node_id, json=True) self.assertTrue(node_info, "Failed to get node info") devices = node_info.get("devices", None) self.assertTrue(devices, "Node {} does not have devices".format(node_id)) if devices[0]["state"].strip().lower() != "online": self.skipTest("Skipping test as it expects to first device to" " be enabled") for device in devices[1:]: heketi_ops.heketi_device_disable(h_node, h_url, device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_url, device["id"]) usedsize_before_resize = self._get_mount_size(pod_name, dir_path) # Resize pvc resize_pvc(self.node, pvc_name, expand_size) verify_pvc_size(self.node, pvc_name, expand_size) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertFalse(len(vol_info['bricks']['brick']) % 3) for node_id in heketi_node_id_list[:3]: for device in devices[1:]: heketi_ops.heketi_device_enable(h_node, h_url, device["id"]) self._write_file(pod_name, "file3", "3G", dir_path) usedsize_after_resize = self._get_mount_size(pod_name, dir_path) self.assertGreater( int(usedsize_before_resize.strip('%')), int(usedsize_after_resize.strip('%')), "Mount size {} should be greater than {}".format( usedsize_before_resize, usedsize_after_resize)) self._write_file(pod_name, "file4", "1024", dir_path) # Validate dist-rep volume with 6 bricks after pv resize vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertEqual( 6, len(vol_info['bricks']['brick']), "Expected bricks count is 6, but actual brick count is {}".format( len(vol_info['bricks']['brick'])))
def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest( "Nodes are expected to have at least 2 devices") if not all([int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2]]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min([ n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > ( devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable( self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_check_device_disable_based_on_heketi_zone( self, zone_count, is_disable_on_different_zone, is_set_env=False): """Validate device disable in different heketi zones""" online_device_count, expected_device_count = 0, 4 expected_node_count, heketi_zone_checking, sc_name = 4, "strict", None # Check amount of available online nodes online_node_count = len(self._get_online_nodes()) if online_node_count < expected_node_count: self.skipTest('Available node count {} is less than expected node ' 'count {}'.format(online_node_count, expected_node_count)) # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Get the online devices and nodes w.r.t. to zone zone_devices_nodes = self._get_online_devices_and_nodes_with_zone() # Check amount of available online heketi devices for zone in zone_devices_nodes: online_device_count += len(zone_devices_nodes[zone]['devices']) if online_device_count < expected_device_count: self.skipTest( "Expected the heketi device count {} is greater than the " "available device count {}".format(expected_device_count, online_device_count)) # Create sc or else directly set env to "strict" inside dc is_create_sc = not is_set_env if is_create_sc: sc_name = self.create_storage_class( sc_name_prefix=self.prefix, vol_name_prefix=self.prefix, heketi_zone_checking=heketi_zone_checking) if is_set_env: self._set_zone_check_env_in_heketi_dc(heketi_zone_checking) # Choose a zone and device_id to disable the device for zone, nodes_and_devices in zone_devices_nodes.items(): if zone_count == 3: # Select a device with a zone having multiple nodes in # same zone to cover the test cases "disable in same zone" if len(nodes_and_devices['devices']) > 1: zone_with_disabled_device = zone disabled_device = nodes_and_devices['devices'][0] break else: # Select device from any of the zones zone_with_disabled_device = zone disabled_device = nodes_and_devices['devices'][0] break # Disable the selected device heketi_ops.heketi_device_disable(self.h_client, self.h_server, disabled_device) self.addCleanup(heketi_ops.heketi_device_enable, self.h_client, self.h_server, disabled_device) # Create some DCs with PVCs and check brick placement in heketi zones pod_names = self._create_dcs_and_check_brick_placement( self.prefix, sc_name, heketi_zone_checking, zone_count) # Enable disabled device heketi_ops.heketi_device_enable(self.h_client, self.h_server, disabled_device) if is_disable_on_different_zone: # Select the new device in a different zone for zone, nodes_and_devices in zone_devices_nodes.items(): if zone != zone_with_disabled_device: new_device_to_disable = nodes_and_devices['devices'][0] break else: # Select the new device in the same zone new_device_to_disable = zone_devices_nodes[ zone_with_disabled_device]['devices'][1] # Disable the newly selected device heketi_ops.heketi_device_disable(self.h_client, self.h_server, new_device_to_disable) self.addCleanup(heketi_ops.heketi_device_enable, self.h_client, self.h_server, new_device_to_disable) # Verify if pods are in ready state for pod_name in pod_names: openshift_ops.wait_for_pod_be_ready(self.node, pod_name, timeout=5, wait_step=2)
def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" # Get nodes info node_id_list = heketi_ops.heketi_node_list( self.heketi_client_node, self.heketi_server_url) node_info_list = [] for node_id in node_id_list[0:3]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) node_info_list.append(node_info) # Disable 4th and other nodes if len(node_id_list) > 3: for node in node_id_list[3:]: heketi_ops.heketi_node_disable( self.heketi_client_node, self.heketi_server_url, node_id) self.addCleanup( heketi_ops.heketi_node_enable, self.heketi_client_node, self.heketi_server_url, node_id) # Disable second and other devices on the first 3 nodes for node_info in node_info_list[0:3]: devices = node_info["devices"] self.assertTrue( devices, "Node '%s' does not have devices." % node_info["id"]) if devices[0]["state"].strip().lower() != "online": self.skipTest("Test expects first device to be enabled.") if len(devices) < 2: continue for device in node_info["devices"][1:]: out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device["id"]) self.assertTrue( out, "Failed to disable the device %s" % device["id"]) self.addCleanup( heketi_ops.heketi_device_enable, self.heketi_client_node, self.heketi_server_url, device["id"]) # Create heketi volume out = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create heketi volume of size 1") g.log.info("Successfully created heketi volume of size 1") device_id = out["bricks"][0]["device"] self.addCleanup( heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) # Disable device g.log.info("Disabling '%s' device" % device_id) out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to disable the device %s" % device_id) g.log.info("Successfully disabled device %s" % device_id) try: # Get device info g.log.info("Retrieving '%s' device info" % device_id) out = heketi_ops.heketi_device_info( self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, "Failed to get device info %s" % device_id) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] if out["state"].lower().strip() != "offline": raise exceptions.ExecutionError( "Device %s is not in offline state." % name) g.log.info("Device %s is now offine" % name) # Try to create heketi volume g.log.info("Creating heketi volume: Expected to fail.") try: out = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 1, json=True) except exceptions.ExecutionError: g.log.info("Volume was not created as expected.") else: self.addCleanup( heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) msg = "Volume unexpectedly created. Out: %s" % out assert False, msg finally: # Enable the device back g.log.info("Enable '%s' device back." % device_id) out = heketi_ops.heketi_device_enable( self.heketi_client_node, self.heketi_server_url, device_id) self.assertTrue(out, "Failed to enable the device %s" % device_id) g.log.info("Successfully enabled device %s" % device_id) # Get device info out = heketi_ops.heketi_device_info( self.heketi_client_node, self.heketi_server_url, device_id, json=True) self.assertTrue(out, ("Failed to get device info %s" % device_id)) g.log.info("Successfully retrieved device info %s" % device_id) name = out["name"] if out["state"] != "online": raise exceptions.ExecutionError( "Device %s is not in online state." % name) # Create heketi volume of size out = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 1, json=True) self.assertTrue(out, "Failed to create volume of size 1") self.addCleanup( heketi_ops.heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, out["bricks"][0]["volume"]) g.log.info("Successfully created volume of size 1") name = out["name"] # Get gluster volume info vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) self.assertTrue(vol_info, "Failed to get '%s' volume info." % name) g.log.info("Successfully got the '%s' volume info." % name)