def test_brick_multiplex_pids_with_diff_vol_option_values(self): """Test Brick Pid's should be same when values of vol options are diff """ h_client, h_url = self.heketi_client_node, self.heketi_server_url # Disable heketi nodes except first three nodes h_nodes_list = heketi_node_list(h_client, h_url) for node_id in h_nodes_list[3:]: heketi_node_disable(h_client, h_url, node_id) self.addCleanup(heketi_node_enable, h_client, h_url, node_id) # Create storage class with diff volumeoptions sc1 = self.create_storage_class(volumeoptions='user.heketi.abc 1') sc2 = self.create_storage_class(volumeoptions='user.heketi.abc 2') # Create PVC's with above SC pvc1 = self.create_and_wait_for_pvcs(sc_name=sc1) pvc2 = self.create_and_wait_for_pvcs(sc_name=sc2) # Get vol info and status vol_info1 = get_gluster_vol_info_by_pvc_name(self.node, pvc1[0]) vol_info2 = get_gluster_vol_info_by_pvc_name(self.node, pvc2[0]) vol_status1 = get_gluster_vol_status(vol_info1['gluster_vol_id']) vol_status2 = get_gluster_vol_status(vol_info2['gluster_vol_id']) # Verify vol options err_msg = ('Volume option "user.heketi.abc %s" did not got match for ' 'volume %s in gluster vol info') self.assertEqual( vol_info1['options']['user.heketi.abc'], '1', err_msg % (1, vol_info1['gluster_vol_id'])) self.assertEqual( vol_info2['options']['user.heketi.abc'], '2', err_msg % (2, vol_info2['gluster_vol_id'])) # Get the PID's and match them pids1 = set() for brick in vol_info1['bricks']['brick']: host, bname = brick['name'].split(":") pids1.add(vol_status1[host][bname]['pid']) pids2 = set() for brick in vol_info2['bricks']['brick']: host, bname = brick['name'].split(":") pids2.add(vol_status2[host][bname]['pid']) err_msg = ('Pids of both the volumes %s and %s are expected to be' 'same. But got the different Pids "%s" and "%s".' % (vol_info1['gluster_vol_id'], vol_info2['gluster_vol_id'], pids1, pids2)) self.assertEqual(pids1, pids2, err_msg)
def test_pvc_arbiter_placement_and_expansion_with_zone_check_set_in_sc( self, zone_count, heketi_zone_checking): # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Create storage class setting "user.heketi.zone-checking" up sc_name = self.create_storage_class( sc_name_prefix=self.prefix, vol_name_prefix=self.prefix, allow_volume_expansion=True, is_arbiter_vol=True, heketi_zone_checking=heketi_zone_checking) # Create PVC using above storage class pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=self.prefix, sc_name=sc_name) # Validate brick placement and expand PVC self._validate_brick_placement_in_correct_zone_or_with_expand_pvc( heketi_zone_checking, pvc_name, zone_count, expand=True) # Make sure that gluster vol has appropriate option set vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) self.assertIn('user.heketi.zone-checking', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.zone-checking'], heketi_zone_checking) self.assertIn('user.heketi.arbiter', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.arbiter'], 'true') # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
def test_check_pvc_placement_with_zone_check_in_sc_and_dc_both( self, check_in_dc, check_in_sc): zone_count = 3 # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Create storage class with arbiter option set sc_name = self.create_storage_class(sc_name_prefix=self.prefix, vol_name_prefix=self.prefix, heketi_zone_checking=check_in_sc) # Set "user.heketi.zone-checking" to strict inside heketi dc self._set_zone_check_env_in_heketi_dc(check_in_dc) # Create a PVC pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=self.prefix, sc_name=sc_name) # Validate brick placement self._validate_brick_placement_in_correct_zone_or_with_expand_pvc( check_in_dc, pvc_name, zone_count) # Make sure that gluster vol has appropriate option set vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) self.assertIn('user.heketi.zone-checking', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.zone-checking'], check_in_dc) # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
def test_expand_arbiter_volume_according_to_avg_file_size( self, avg_file_size, expected_brick_size, vol_expand=True): """Validate expansion of arbiter volume with diff avg file size""" data_hosts = [] arbiter_hosts = [] # set tags arbiter:disabled, arbiter:required for i, node_id in enumerate(self.node_id_list): self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled' if i < 2 else 'required') node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) (data_hosts.append(node_info['hostnames']['storage'][0]) if i < 2 else arbiter_hosts.append(node_info['hostnames']['storage'][0])) self.assertEqual(node_info['tags']['arbiter'], 'disabled' if i < 2 else 'required') # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True, allow_volume_expansion=True, arbiter_avg_file_size=avg_file_size) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() vol_expanded = False for i in range(2): vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) bricks = ( self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=(2 if vol_expanded else 1), data_bricks=(4 if vol_expanded else 2))) # verify arbiter bricks lies on arbiter hosts for brick in bricks['arbiter_list']: ip, brick_name = brick['name'].split(':') self.assertIn(ip, arbiter_hosts) # verify the size of arbiter brick cmd = "df -h %s --output=size | tail -1" % brick_name out = openshift_ops.cmd_run_on_gluster_pod_or_node( self.node, cmd, ip) self.assertEqual(out, expected_brick_size) # verify that data bricks lies on data hosts for brick in bricks['data_list']: self.assertIn(brick['name'].split(':')[0], data_hosts) if vol_expanded or not vol_expand: break # Expand PVC and verify the size pvc_size = 2 openshift_ops.resize_pvc(self.node, self.pvc_name, pvc_size) openshift_ops.verify_pvc_size(self.node, self.pvc_name, pvc_size) vol_expanded = True
def test_pvc_placement_and_expansion_with_zone_check_set_in_dc( self, zone_count, is_arbiter): heketi_zone_checking, expand_size = "strict", 2 # Create storage class setting expansion and arbiter option up sc_name = self.create_storage_class(sc_name_prefix=self.prefix, vol_name_prefix=self.prefix, allow_volume_expansion=True, is_arbiter_vol=True) # Create PVC using above storage class pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=self.prefix, sc_name=sc_name) # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Set "user.heketi.zone-checking" to strict inside heketi dc self._set_zone_check_env_in_heketi_dc(heketi_zone_checking) # Expand PVC openshift_storage_libs.enable_pvc_resize(self.node) openshift_ops.resize_pvc(self.node, pvc_name, expand_size) openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size) # Make sure that gluster vol has appropriate option set vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) if is_arbiter: self.assertIn('user.heketi.arbiter', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.arbiter'], 'true') # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
def test_arbiter_volume_delete_using_pvc(self): """Test Arbiter volume delete using pvc when volume is not mounted on app pod """ prefix = "autotest-%s" % utils.get_random_str() # Create sc with gluster arbiter info sc_name = self.create_storage_class(vol_name_prefix=prefix, is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix, sc_name=sc_name) # Get vol info gluster_vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) # Verify arbiter volume properties self.verify_amount_and_proportion_of_arbiter_and_data_bricks( gluster_vol_info) # Get volume ID gluster_vol_id = gluster_vol_info["gluster_vol_id"] # Delete the pvc openshift_ops.oc_delete(self.node, 'pvc', pvc_name) openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name) # Check the heketi volume list if pvc is deleted g.log.info("List heketi volumes") heketi_volumes = heketi_ops.heketi_volume_list(self.heketi_client_node, self.heketi_server_url) err_msg = "Failed to delete heketi volume by prefix %s" % prefix self.assertNotIn(prefix, heketi_volumes, err_msg) # Check presence for the gluster volume get_gluster_vol_info = volume_ops.get_volume_info( "auto_get_gluster_endpoint", gluster_vol_id) err_msg = "Failed to delete gluster volume %s" % gluster_vol_id self.assertFalse(get_gluster_vol_info, err_msg) # Check presence of bricks and lvs for brick in gluster_vol_info['bricks']['brick']: gluster_node_ip, brick_name = brick["name"].split(":") with self.assertRaises(exceptions.ExecutionError): cmd = "df %s" % brick_name openshift_ops.cmd_run_on_gluster_pod_or_node( self.node, cmd, gluster_node_ip) with self.assertRaises(exceptions.ExecutionError): lv_match = re.search(BRICK_REGEX, brick["name"]) if lv_match: cmd = "lvs %s" % lv_match.group(2).strip() openshift_ops.cmd_run_on_gluster_pod_or_node( self.node, cmd, gluster_node_ip)
def test_arbiter_volume_expand_using_pvc(self): """Validate arbiter volume expansion by PVC creation""" # Create sc with gluster arbiter info self.create_storage_class( is_arbiter_vol=True, allow_volume_expansion=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() # Get vol info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info) pvc_size = 2 resize_pvc(self.node, self.pvc_name, pvc_size) verify_pvc_size(self.node, self.pvc_name, pvc_size) vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_arbiter_pvc_create(self): """Validate dynamic provision of an arbiter volume""" # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() # Get vol info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
def test_arbiter_pvc_create(self): """Validate dynamic provision of an arbiter volume""" # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() # Get vol info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
def test_arbiter_volume_expand_using_pvc(self): """Validate arbiter volume expansion by PVC creation""" # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True, allow_volume_expansion=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() # Get vol info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info) pvc_size = 2 resize_pvc(self.node, self.pvc_name, pvc_size) verify_pvc_size(self.node, self.pvc_name, pvc_size) vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_verify_arbiter_brick_able_to_contain_expected_amount_of_files( self, pvc_size_gb, avg_file_size): """Validate arbiter brick creation with different avg file size""" # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True, arbiter_avg_file_size=avg_file_size) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc(pvc_size_gb) # Get volume info vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) # Verify proportion of data and arbiter bricks bricks_info = ( self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info)) expected_file_amount = pvc_size_gb * 1024**2 // (avg_file_size or 64) expected_file_amount = (expected_file_amount // bricks_info['arbiter_amount']) # Try to create expected amount of files on arbiter brick mount passed_arbiter_bricks = [] not_found = "Mount Not Found" for brick in bricks_info['arbiter_list']: # "brick path" looks like following: # ip_addr:/path/to/vg/brick_unique_name/brick gluster_ip, brick_path = brick["name"].split(":") brick_path = brick_path[0:-6] cmd = "mount | grep %s || echo '%s'" % (brick_path, not_found) out = openshift_ops.cmd_run_on_gluster_pod_or_node( self.node, cmd, gluster_ip) if out != not_found: cmd = ("python -c \"[" " open('%s/foo_file{0}'.format(i), 'a').close()" " for i in range(%s)" "]\"" % (brick_path, expected_file_amount)) openshift_ops.cmd_run_on_gluster_pod_or_node( self.node, cmd, gluster_ip) passed_arbiter_bricks.append(brick["name"]) # Make sure all the arbiter bricks were checked for brick in bricks_info['arbiter_list']: self.assertIn( brick["name"], passed_arbiter_bricks, "Arbiter brick '%s' was not verified. Looks like it was " "not found on any of gluster PODs/nodes." % brick["name"])
def test_verify_arbiter_brick_able_to_contain_expected_amount_of_files( self, pvc_size_gb, avg_file_size): """Validate arbiter brick creation with different avg file size""" # Create sc with gluster arbiter info self.create_storage_class( is_arbiter_vol=True, arbiter_avg_file_size=avg_file_size) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc(pvc_size_gb) # Get volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) # Verify proportion of data and arbiter bricks bricks_info = ( self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info)) expected_file_amount = pvc_size_gb * 1024**2 / (avg_file_size or 64) expected_file_amount = (expected_file_amount / bricks_info['arbiter_amount']) # Try to create expected amount of files on arbiter brick mount passed_arbiter_bricks = [] not_found = "Mount Not Found" for brick in bricks_info['arbiter_list']: # "brick path" looks like following: # ip_addr:/path/to/vg/brick_unique_name/brick gluster_ip, brick_path = brick["name"].split(":") brick_path = brick_path[0:-6] cmd = "mount | grep %s || echo '%s'" % (brick_path, not_found) out = cmd_run_on_gluster_pod_or_node(self.node, cmd, gluster_ip) if out != not_found: cmd = ( "python -c \"[" " open('%s/foo_file{0}'.format(i), 'a').close()" " for i in range(%s)" "]\"" % (brick_path, expected_file_amount) ) cmd_run_on_gluster_pod_or_node(self.node, cmd, gluster_ip) passed_arbiter_bricks.append(brick["name"]) # Make sure all the arbiter bricks were checked for brick in bricks_info['arbiter_list']: self.assertIn( brick["name"], passed_arbiter_bricks, "Arbiter brick '%s' was not verified. Looks like it was " "not found on any of gluster PODs/nodes." % brick["name"])
def test_arbiter_pvc_placement_with_zone_check_set_in_sc( self, zone_count, heketi_zone_checking, node_count=None): # Check amount of available online nodes if node_count: online_node_count = len(self._get_online_nodes()) if online_node_count < node_count: self.skipTest( 'Available node count {} is less than expected node ' 'count {}'.format(online_node_count, node_count)) # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Create storage class setting "user.heketi.zone-checking" up sc_name = self.create_storage_class( sc_name_prefix=self.prefix, vol_name_prefix=self.prefix, is_arbiter_vol=True, heketi_zone_checking=heketi_zone_checking) # PVC creation should fail when zones are below 3 and check is strict if heketi_zone_checking == "strict" and zone_count < 3: self.assertRaises(exceptions.ExecutionError, self.create_and_wait_for_pvc, pvc_name_prefix=self.prefix, sc_name=sc_name, timeout=30) else: # Create PVC using above storage class pvc_name = self.create_and_wait_for_pvc( pvc_name_prefix=self.prefix, sc_name=sc_name) # Validate brick placement self._validate_brick_placement_in_correct_zone_or_with_expand_pvc( heketi_zone_checking, pvc_name, zone_count) # Make sure that gluster vol has appropriate option set vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) self.assertIn('user.heketi.zone-checking', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.zone-checking'], heketi_zone_checking) self.assertIn('user.heketi.arbiter', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.arbiter'], 'true') # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
def test_check_pvc_placement_based_on_the_heketi_zones( self, zone_count, heketi_zone_checking, is_arbiter_vol, expand=False): # TODO(vponomar): implement setting env vars for the Heketi dc. # Check amount of available online heketi zones self._check_for_available_zones(zone_count) # Create storage class setting "user.heketi.zone-checking" option up prefix = "autotests-heketi-zones" sc_name = self.create_storage_class( sc_name_prefix=prefix, vol_name_prefix=prefix, allow_volume_expansion=expand, is_arbiter_vol=is_arbiter_vol, heketi_zone_checking=heketi_zone_checking) # Create PVC using above storage class pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix, sc_name=sc_name) # Validate brick placement and expand if needed self._validate_brick_placement_in_correct_zone_or_with_expand_pvc( heketi_zone_checking, pvc_name, zone_count, expand=expand) # Make sure that gluster vol has appropriate option set vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) self.assertIn('user.heketi.zone-checking', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.zone-checking'], heketi_zone_checking) if is_arbiter_vol: self.assertIn('user.heketi.arbiter', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.arbiter'], 'true') # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
def test_pv_resize_when_heketi_down(self): """Create a PVC and try to expand it when heketi is down, It should fail. After heketi is up, expand PVC should work. """ self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc() dc_name, pod_name = self.create_dc_with_pvc(pvc_name) pv_name = get_pv_name_from_pvc(self.node, pvc_name) custom = (r':metadata.annotations.' r'"gluster\.kubernetes\.io\/heketi-volume-id"') vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0] h_vol_info = heketi_ops.heketi_volume_info(self.heketi_client_node, self.heketi_server_url, vol_id, json=True) # Bring the heketi POD down scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, pod_amount=0) self.addCleanup(scale_dc_pod_amount_and_wait, self.node, self.heketi_dc_name, pod_amount=1) cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1' ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1') self.assertFalse(ret, 'Not able to write file with err: %s' % err) wait_for_pod_be_ready(self.node, pod_name, 10, 5) resize_pvc(self.node, pvc_name, 2) wait_for_events(self.node, pvc_name, obj_type='PersistentVolumeClaim', event_type='Warning', event_reason='VolumeResizeFailed') # Verify volume was not expanded vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name']) self.assertEqual(len(vol_info['bricks']['brick']), len(h_vol_info['bricks'])) # Bring the heketi POD up scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, pod_amount=1) # Verify volume expansion verify_pvc_size(self.node, pvc_name, 2) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertFalse(len(vol_info['bricks']['brick']) % 3) self.assertLess(len(h_vol_info['bricks']), len(vol_info['bricks']['brick'])) # Wait for remount after expansion for w in waiter.Waiter(timeout=30, interval=5): ret, out, err = oc_rsh(self.node, pod_name, "df -Ph /mnt | awk '{print $2}' | tail -1") self.assertFalse(ret, 'Failed with err: %s and Output: %s' % (err, out)) if out.strip() == '2.0G': break # Write data making sure we have more space than it was ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2') self.assertFalse(ret, 'Not able to write file with err: %s' % err) # Verify pod is running wait_for_pod_be_ready(self.node, pod_name, 10, 5)
def test_arbiter_pvc_mount_on_pod(self): """Validate new volume creation using app pod""" # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() # Create POD with attached volume mount_path = "/mnt" pod_name = oc_create_tiny_pod_with_volume( self.node, self.pvc_name, "test-arbiter-pvc-mount-on-app-pod", mount_path=mount_path) self.addCleanup(oc_delete, self.node, 'pod', pod_name) # Wait for POD be up and running wait_for_pod_be_ready(self.node, pod_name, timeout=60, wait_step=2) # Get volume ID vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) vol_id = vol_info["gluster_vol_id"] # Verify that POD has volume mounted on it cmd = "oc exec {0} -- df -PT {1} | grep {1}".format( pod_name, mount_path) out = self.cmd_run(cmd) err_msg = ("Failed to get info about mounted '%s' volume. " "Output is empty." % vol_id) self.assertTrue(out, err_msg) # Verify volume data on POD # Filesystem Type Size Used Avail Cap Mounted on # IP:vol_id fuse.glusterfs 1038336 33408 1004928 3% /mnt data = [s for s in out.strip().split(' ') if s] actual_vol_id = data[0].split(':')[-1] self.assertEqual( vol_id, actual_vol_id, "Volume ID does not match: expected is " "'%s' and actual is '%s'." % (vol_id, actual_vol_id)) self.assertIn( "gluster", data[1], "Filesystem type is expected to be of 'glusterfs' type. " "Actual value is '%s'." % data[1]) self.assertEqual( mount_path, data[6], "Unexpected mount path. Expected is '%s' and actual is '%s'." % ( mount_path, data[6])) max_size = 1024 ** 2 total_size = int(data[2]) self.assertLessEqual( total_size, max_size, "Volume has bigger size '%s' than expected - '%s'." % ( total_size, max_size)) min_available_size = int(max_size * 0.93) available_size = int(data[4]) self.assertLessEqual( min_available_size, available_size, "Minimum available size (%s) not satisfied. Actual is '%s'." % ( min_available_size, available_size)) # Write data on mounted volume write_data_cmd = ( "dd if=/dev/zero of=%s/file$i bs=%s count=1; " % ( mount_path, available_size)) self.cmd_run(write_data_cmd)
def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest( "Nodes are expected to have at least 2 devices") if not all([int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2]]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min([ n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > ( devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable( self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable( self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices( self, node_tags): """Validate exapnsion of arbiter volume with defferent tags This test case is going to run two tests: 1. If value is True it is going to set tags on nodes and run test 2. If value is False it is going to set tags on devices and run test """ data_nodes = [] arbiter_nodes = [] # set tags arbiter:disabled, arbiter:required for i, node_id in enumerate(self.node_id_list): if node_tags: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled' if i < 2 else 'required') node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) if not node_tags: for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled' if i < 2 else 'required') device_info = heketi_ops.heketi_device_info( self.heketi_client_node, self.heketi_server_url, device['id'], json=True) self.assertEqual(device_info['tags']['arbiter'], 'disabled' if i < 2 else 'required') node = { 'id': node_id, 'host': node_info['hostnames']['storage'][0] } if node_tags: self.assertEqual(node_info['tags']['arbiter'], 'disabled' if i < 2 else 'required') data_nodes.append(node) if i < 2 else arbiter_nodes.append(node) # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True, allow_volume_expansion=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info) arbiter_hosts = [obj['host'] for obj in arbiter_nodes] data_hosts = [obj['host'] for obj in data_nodes] for brick in bricks['arbiter_list']: self.assertIn(brick['name'].split(':')[0], arbiter_hosts) for brick in bricks['data_list']: self.assertIn(brick['name'].split(':')[0], data_hosts) # Expand PVC and verify the size pvc_size = 2 openshift_ops.resize_pvc(self.node, self.pvc_name, pvc_size) openshift_ops.verify_pvc_size(self.node, self.pvc_name, pvc_size) vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4) for brick in bricks['arbiter_list']: self.assertIn(brick['name'].split(':')[0], arbiter_hosts) for brick in bricks['data_list']: self.assertIn(brick['name'].split(':')[0], data_hosts)
def test_arbiter_required_tag_on_node_or_devices_other_disabled( self, r_node_tag, d_node_tag, r_device_tag, d_device_tag): """Validate arbiter vol creation with node or device tag""" pvc_amount = 3 # Get Heketi nodes info node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node, self.heketi_server_url) # Disable n-3 nodes for node_id in node_id_list[3:]: heketi_ops.heketi_node_disable(self.heketi_client_node, self.heketi_server_url, node_id) self.addCleanup(heketi_ops.heketi_node_enable, self.heketi_client_node, self.heketi_server_url, node_id) # Set arbiter:required tags arbiter_node = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id_list[0], json=True) arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage'] self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id_list[0], ('required' if r_node_tag else None), revert_to=arbiter_node.get('tags', {}).get('arbiter')) for device in arbiter_node['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], ('required' if r_device_tag else None), revert_to=device.get('tags', {}).get('arbiter')) # Set arbiter:disabled tags data_nodes_ip_addresses = [] for node_id in node_id_list[1:]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) if not any([ int(d['storage']['free']) > (pvc_amount * 1024**2) for d in node_info['devices'] ]): self.skipTest("Devices are expected to have more than " "%sGb of free space" % pvc_amount) data_nodes_ip_addresses.extend(node_info['hostnames']['storage']) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], ('disabled' if d_device_tag else None), revert_to=device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, ('disabled' if d_node_tag else None), revert_to=node_info.get('tags', {}).get('arbiter')) # Create PVCs and check that their bricks are correctly located self.create_storage_class(is_arbiter_vol=True) for i in range(pvc_amount): self.create_and_wait_for_pvc(1) # Get gluster volume info vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) arbiter_bricks, data_bricks = [], [] for brick in vol_info['bricks']['brick']: if int(brick["isArbiter"]) == 1: arbiter_bricks.append(brick["name"]) else: data_bricks.append(brick["name"]) # Verify that all the arbiter bricks are located on # arbiter:required node and data bricks on all other nodes only. for arbiter_brick in arbiter_bricks: self.assertIn( arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses) for data_brick in data_bricks: self.assertIn( data_brick.split(':')[0], data_nodes_ip_addresses)
def test_aribiter_required_tag_on_node_or_devices_other_disabled( self, node_with_tag): """Validate arbiter vol creation with required node or device tag""" pvc_amount = 3 # Get Heketi nodes info node_id_list = heketi_ops.heketi_node_list( self.heketi_client_node, self.heketi_server_url) # Set arbiter:required tags arbiter_node = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id_list[0], json=True) arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage'] self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id_list[0], ('required' if node_with_tag else None), revert_to=arbiter_node.get('tags', {}).get('arbiter')) for device in arbiter_node['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], (None if node_with_tag else 'required'), revert_to=device.get('tags', {}).get('arbiter')) # Set arbiter:disabled tags data_nodes, data_nodes_ip_addresses = [], [] for node_id in node_id_list[1:]: node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) if not any([int(d['storage']['free']) > (pvc_amount * 1024**2) for d in node_info['devices']]): self.skipTest( "Devices are expected to have more than " "%sGb of free space" % pvc_amount) data_nodes_ip_addresses.extend(node_info['hostnames']['storage']) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], (None if node_with_tag else 'disabled'), revert_to=device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, ('disabled' if node_with_tag else None), revert_to=node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Create PVCs and check that their bricks are correctly located self.create_storage_class(is_arbiter_vol=True) for i in range(pvc_amount): self.create_and_wait_for_pvc(1) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) arbiter_bricks, data_bricks = [], [] for brick in vol_info['bricks']['brick']: if int(brick["isArbiter"]) == 1: arbiter_bricks.append(brick["name"]) else: data_bricks.append(brick["name"]) # Verify that all the arbiter bricks are located on # arbiter:required node and data bricks on all other nodes only. for arbiter_brick in arbiter_bricks: self.assertIn( arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses) for data_brick in data_bricks: self.assertIn( data_brick.split(':')[0], data_nodes_ip_addresses)
def test_create_arbiter_vol_with_more_than_one_brick_set(self): """Validate volume creation using heketi for more than six brick set""" # Set arbiter:disabled tag to the data devices and get their info data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) if len(node_info['devices']) < 2: self.skipTest("Nodes are expected to have at least 2 devices") if not all([ int(d['storage']['free']) > (3 * 1024**2) for d in node_info['devices'][0:2] ]): self.skipTest( "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info(self.heketi_client_node, self.heketi_server_url, node_id, json=True) self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) # Get second big volume between 2 data nodes and use it # for target vol calculation. for i, node_info in enumerate(data_nodes): biggest_disk_free_space = 0 for device in node_info['devices'][0:2]: free = int(device['storage']['free']) if free > biggest_disk_free_space: biggest_disk_free_space = free data_nodes[i]['biggest_free_space'] = biggest_disk_free_space target_vol_size_kb = 1 + min( [n['biggest_free_space'] for n in data_nodes]) # Check that all the data devices have, at least, half of required size all_big_enough = True for node_info in data_nodes: for device in node_info['devices'][0:2]: if float(device['storage']['free']) < (target_vol_size_kb / 2): all_big_enough = False break # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. if not all_big_enough: helper_vol_size_kb, target_vol_size_kb = 0, 0 smaller_device_id = None for node_info in data_nodes: devices = node_info['devices'] if ((devices[0]['storage']['free']) > (devices[1]['storage']['free'])): smaller_device_id = devices[1]['id'] smaller_device = devices[1]['storage']['free'] bigger_device = devices[0]['storage']['free'] else: smaller_device_id = devices[0]['id'] smaller_device = devices[0]['storage']['free'] bigger_device = devices[1]['storage']['free'] diff = bigger_device - (2 * smaller_device) + 1 if diff > helper_vol_size_kb: helper_vol_size_kb = diff target_vol_size_kb = bigger_device - diff # Disable smaller device and create helper vol on bigger one # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable(self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) # Create target arbiter volume self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4)
def test_arbiter_pvc_mount_on_pod(self): """Validate new volume creation using app pod""" # Create sc with gluster arbiter info self.create_storage_class(is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() # Create POD with attached volume mount_path = "/mnt" pod_name = openshift_ops.oc_create_tiny_pod_with_volume( self.node, self.pvc_name, "test-arbiter-pvc-mount-on-app-pod", mount_path=mount_path) self.addCleanup(openshift_ops.oc_delete, self.node, 'pod', pod_name) # Wait for POD be up and running openshift_ops.wait_for_pod_be_ready(self.node, pod_name, timeout=60, wait_step=2) # Get volume ID vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) vol_id = vol_info["gluster_vol_id"] # Verify that POD has volume mounted on it cmd = "oc exec {0} -- df -PT {1} | grep {1}".format( pod_name, mount_path) out = self.cmd_run(cmd) err_msg = ("Failed to get info about mounted '%s' volume. " "Output is empty." % vol_id) self.assertTrue(out, err_msg) # Verify volume data on POD # Filesystem Type Size Used Avail Cap Mounted on # IP:vol_id fuse.glusterfs 1038336 33408 1004928 3% /mnt data = [s for s in out.strip().split(' ') if s] actual_vol_id = data[0].split(':')[-1] self.assertEqual( vol_id, actual_vol_id, "Volume ID does not match: expected is " "'%s' and actual is '%s'." % (vol_id, actual_vol_id)) self.assertIn( "gluster", data[1], "Filesystem type is expected to be of 'glusterfs' type. " "Actual value is '%s'." % data[1]) self.assertEqual( mount_path, data[6], "Unexpected mount path. Expected is '%s' and actual is '%s'." % (mount_path, data[6])) max_size = 1024**2 total_size = int(data[2]) self.assertLessEqual( total_size, max_size, "Volume has bigger size '%s' than expected - '%s'." % (total_size, max_size)) min_available_size = int(max_size * 0.93) available_size = int(data[4]) self.assertLessEqual( min_available_size, available_size, "Minimum available size (%s) not satisfied. Actual is '%s'." % (min_available_size, available_size)) # Write data on mounted volume write_data_cmd = ("dd if=/dev/zero of=%s/file$i bs=%s count=1; " % (mount_path, available_size)) self.cmd_run(write_data_cmd)
def test_pv_resize_device_disabled(self): """Validate resize after disabling all devices except one""" h_node, h_url = self.heketi_client_node, self.heketi_server_url # expand volume size and path volume is mounted expand_size, dir_path = 7, "/mnt" # Get nodes info heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_url) if len(heketi_node_id_list) < 3: self.skipTest( "At-least 3 gluster nodes are required to execute test case") self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc(pvc_size=2) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) dc_name, pod_name = self.create_dc_with_pvc(pvc_name) self._write_file(pod_name, "file1", "1G", dir_path) with self.assertRaises(AssertionError): self._write_file(pod_name, "file2", "3G", dir_path) # Prepare first 3 nodes and then disable other devices. for node_id in heketi_node_id_list[:3]: node_info = heketi_ops.heketi_node_info(h_node, h_url, node_id, json=True) self.assertTrue(node_info, "Failed to get node info") devices = node_info.get("devices", None) self.assertTrue(devices, "Node {} does not have devices".format(node_id)) if devices[0]["state"].strip().lower() != "online": self.skipTest("Skipping test as it expects to first device to" " be enabled") for device in devices[1:]: heketi_ops.heketi_device_disable(h_node, h_url, device["id"]) self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_url, device["id"]) usedsize_before_resize = self._get_mount_size(pod_name, dir_path) # Resize pvc resize_pvc(self.node, pvc_name, expand_size) verify_pvc_size(self.node, pvc_name, expand_size) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertFalse(len(vol_info['bricks']['brick']) % 3) for node_id in heketi_node_id_list[:3]: for device in devices[1:]: heketi_ops.heketi_device_enable(h_node, h_url, device["id"]) self._write_file(pod_name, "file3", "3G", dir_path) usedsize_after_resize = self._get_mount_size(pod_name, dir_path) self.assertGreater( int(usedsize_before_resize.strip('%')), int(usedsize_after_resize.strip('%')), "Mount size {} should be greater than {}".format( usedsize_before_resize, usedsize_after_resize)) self._write_file(pod_name, "file4", "1024", dir_path) # Validate dist-rep volume with 6 bricks after pv resize vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertEqual( 6, len(vol_info['bricks']['brick']), "Expected bricks count is 6, but actual brick count is {}".format( len(vol_info['bricks']['brick'])))
def test_check_pvc_placement_based_on_the_heketi_zones( self, zone_count, heketi_zone_checking, is_arbiter_vol, expand=False): # TODO(vponomar): implement setting env vars for the Heketi dc. # Check amount of available online heketi nodes online_nodes = self._get_online_nodes() node_count = len(online_nodes) # Check current amount of the Heketi zones actual_heketi_zones_amount = len(set([n[0] for n in online_nodes])) if zone_count != actual_heketi_zones_amount: if self.allow_heketi_zones_update: if zone_count > node_count: self.skipTest("Not enough online nodes '%s' to test '%s' " "unique Heketi zones." % (node_count, zone_count)) heketi_db_data = self._set_heketi_zones(zone_count) online_nodes = [ (n['Info']['zone'], n['Info']['hostnames']['storage']) for n in heketi_db_data['nodeentries'].values() ] else: self.skipTest( "Required amount of the Heketi zones (%s < %s) is not " "satisfied and 'common.allow_heketi_zones_update' config " "option is set to 'False'." % (zone_count, actual_heketi_zones_amount)) # Create storage class setting "user.heketi.zone-checking" option up prefix = "autotests-heketi-zones" sc_name = self.create_storage_class( sc_name_prefix=prefix, vol_name_prefix=prefix, allow_volume_expansion=expand, is_arbiter_vol=is_arbiter_vol, heketi_zone_checking=heketi_zone_checking) # Create PVC using above storage class pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix, sc_name=sc_name) for i in range(2): # Validate brick placement if heketi zone checking is 'strict' if heketi_zone_checking == 'strict': brick_hosts_ips = ( openshift_ops.get_gluster_host_ips_by_pvc_name( self.node, pvc_name)) placement_zones = {} for brick_host_ip in brick_hosts_ips: for node_zone, node_ips in online_nodes: if brick_host_ip not in node_ips: continue placement_zones[node_zone] = placement_zones.get( node_zone, 0) + 1 break actual_zone_count = len(placement_zones) # NOTE(vponomar): '3' is default amount of volume replicas. # And it is just impossible to find more actual zones than # amount of replicas/bricks. brick_number = len(brick_hosts_ips) expected_zone_count = ( brick_number if brick_number < zone_count else zone_count) self.assertEqual( expected_zone_count, actual_zone_count, "PVC '%s' is incorrectly placed on the Heketi nodes " "according to their zones. Expected '%s' unique zones, " "got '%s'." % (pvc_name, zone_count, actual_zone_count)) # Expand PVC if needed if expand: expand_size, expand = 2, False openshift_storage_libs.enable_pvc_resize(self.node) openshift_ops.resize_pvc(self.node, pvc_name, expand_size) openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size) else: break # Make sure that gluster vol has appropriate option set vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name( self.node, pvc_name) self.assertIn('user.heketi.zone-checking', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.zone-checking'], heketi_zone_checking) if is_arbiter_vol: self.assertIn('user.heketi.arbiter', vol_info['options']) self.assertEqual(vol_info['options']['user.heketi.arbiter'], 'true') # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
def test_pv_resize_when_heketi_down(self): """Create a PVC and try to expand it when heketi is down, It should fail. After heketi is up, expand PVC should work. """ self.create_storage_class(allow_volume_expansion=True) pvc_name = self.create_and_wait_for_pvc() dc_name, pod_name = self.create_dc_with_pvc(pvc_name) pv_name = get_pv_name_from_pvc(self.node, pvc_name) custom = (r':metadata.annotations.' r'"gluster\.kubernetes\.io\/heketi-volume-id"') vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0] h_vol_info = heketi_ops.heketi_volume_info( self.heketi_client_node, self.heketi_server_url, vol_id, json=True) # Bring the heketi POD down scale_dc_pod_amount_and_wait( self.node, self.heketi_dc_name, pod_amount=0) self.addCleanup( scale_dc_pod_amount_and_wait, self.node, self.heketi_dc_name, pod_amount=1) cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1' ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1') self.assertFalse(ret, 'Not able to write file with err: %s' % err) wait_for_pod_be_ready(self.node, pod_name, 10, 5) resize_pvc(self.node, pvc_name, 2) wait_for_events( self.node, pvc_name, obj_type='PersistentVolumeClaim', event_type='Warning', event_reason='VolumeResizeFailed') # Verify volume was not expanded vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name']) self.assertEqual( len(vol_info['bricks']['brick']), len(h_vol_info['bricks'])) # Bring the heketi POD up scale_dc_pod_amount_and_wait( self.node, self.heketi_dc_name, pod_amount=1) # Verify volume expansion verify_pvc_size(self.node, pvc_name, 2) vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name) self.assertFalse(len(vol_info['bricks']['brick']) % 3) self.assertLess( len(h_vol_info['bricks']), len(vol_info['bricks']['brick'])) # Wait for remount after expansion for w in waiter.Waiter(timeout=30, interval=5): ret, out, err = oc_rsh( self.node, pod_name, "df -Ph /mnt | awk '{print $2}' | tail -1") self.assertFalse(ret, 'Failed with err: %s and Output: %s' % ( err, out)) if out.strip() == '2.0G': break # Write data making sure we have more space than it was ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2') self.assertFalse(ret, 'Not able to write file with err: %s' % err) # Verify pod is running wait_for_pod_be_ready(self.node, pod_name, 10, 5)
def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices( self, node_tags): """Validate exapnsion of arbiter volume with defferent tags This test case is going to run two tests: 1. If value is True it is going to set tags on nodes and run test 2. If value is False it is going to set tags on devices and run test """ data_nodes = [] arbiter_nodes = [] # set tags arbiter:disabled, arbiter:required for i, node_id in enumerate(self.node_id_list): if node_tags: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled' if i < 2 else 'required') node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) if not node_tags: for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled' if i < 2 else 'required') device_info = heketi_ops.heketi_device_info( self.heketi_client_node, self.heketi_server_url, device['id'], json=True) self.assertEqual( device_info['tags']['arbiter'], 'disabled' if i < 2 else 'required') node = { 'id': node_id, 'host': node_info['hostnames']['storage'][0]} if node_tags: self.assertEqual( node_info['tags']['arbiter'], 'disabled' if i < 2 else 'required') data_nodes.append(node) if i < 2 else arbiter_nodes.append( node) # Create sc with gluster arbiter info self.create_storage_class( is_arbiter_vol=True, allow_volume_expansion=True) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info) arbiter_hosts = [obj['host'] for obj in arbiter_nodes] data_hosts = [obj['host'] for obj in data_nodes] for brick in bricks['arbiter_list']: self.assertIn(brick['name'].split(':')[0], arbiter_hosts) for brick in bricks['data_list']: self.assertIn(brick['name'].split(':')[0], data_hosts) # Expand PVC and verify the size pvc_size = 2 resize_pvc(self.node, self.pvc_name, pvc_size) verify_pvc_size(self.node, self.pvc_name, pvc_size) vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=2, data_bricks=4) for brick in bricks['arbiter_list']: self.assertIn(brick['name'].split(':')[0], arbiter_hosts) for brick in bricks['data_list']: self.assertIn(brick['name'].split(':')[0], data_hosts)
def test_expand_arbiter_volume_according_to_avg_file_size( self, avg_file_size, expected_brick_size, vol_expand=True): """Validate expansion of arbiter volume with diff avg file size""" data_hosts = [] arbiter_hosts = [] # set tags arbiter:disabled, arbiter:required for i, node_id in enumerate(self.node_id_list): self._set_arbiter_tag_with_further_revert( self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled' if i < 2 else 'required') node_info = heketi_ops.heketi_node_info( self.heketi_client_node, self.heketi_server_url, node_id, json=True) (data_hosts.append(node_info['hostnames']['storage'][0]) if i < 2 else arbiter_hosts.append(node_info['hostnames']['storage'][0])) self.assertEqual( node_info['tags']['arbiter'], 'disabled' if i < 2 else 'required') # Create sc with gluster arbiter info self.create_storage_class( is_arbiter_vol=True, allow_volume_expansion=True, arbiter_avg_file_size=avg_file_size) # Create PVC and wait for it to be in 'Bound' state self.create_and_wait_for_pvc() vol_expanded = False for i in range(2): vol_info = get_gluster_vol_info_by_pvc_name( self.node, self.pvc_name) bricks = ( self.verify_amount_and_proportion_of_arbiter_and_data_bricks( vol_info, arbiter_bricks=(2 if vol_expanded else 1), data_bricks=(4 if vol_expanded else 2) ) ) # verify arbiter bricks lies on arbiter hosts for brick in bricks['arbiter_list']: ip, brick_name = brick['name'].split(':') self.assertIn(ip, arbiter_hosts) # verify the size of arbiter brick cmd = "df -h %s --output=size | tail -1" % brick_name out = cmd_run_on_gluster_pod_or_node(self.node, cmd, ip) self.assertEqual(out, expected_brick_size) # verify that data bricks lies on data hosts for brick in bricks['data_list']: self.assertIn(brick['name'].split(':')[0], data_hosts) if vol_expanded or not vol_expand: break # Expand PVC and verify the size pvc_size = 2 resize_pvc(self.node, self.pvc_name, pvc_size) verify_pvc_size(self.node, self.pvc_name, pvc_size) vol_expanded = True