def test_pv_resize_with_prefix_for_name_and_size( self, create_vol_name_prefix=False, valid_size=True): """Validate PV resize with and without name prefix""" dir_path = "/mnt/" node = self.ocp_client[0] # Create PVC self.create_storage_class( allow_volume_expansion=True, create_vol_name_prefix=create_vol_name_prefix) pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(node, pvc_name) self.addCleanup(oc_delete, node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0) pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) if create_vol_name_prefix: ret = heketi_ops.verify_volume_name_prefix( node, self.sc['volumenameprefix'], self.sc['secretnamespace'], pvc_name, self.heketi_server_url) self.assertTrue(ret, "verify volnameprefix failed") cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "Failed to execute command %s on %s" % (cmd, node)) pv_name = get_pv_name_from_pvc(node, pvc_name) # If resize size is invalid then size should not change if valid_size: cmd = ("dd if=/dev/urandom of=%sfile2 " "bs=100K count=10000") % dir_path with self.assertRaises(AssertionError): ret, out, err = oc_rsh(node, pod_name, cmd) msg = ("Command '%s' was expected to fail on '%s' node. " "But it returned following: ret is '%s', err is '%s' " "and out is '%s'" % (cmd, node, ret, err, out)) raise ExecutionError(msg) pvc_size = 2 resize_pvc(node, pvc_name, pvc_size) verify_pvc_size(node, pvc_name, pvc_size) verify_pv_size(node, pv_name, pvc_size) else: invalid_pvc_size = 'ten' with self.assertRaises(AssertionError): resize_pvc(node, pvc_name, invalid_pvc_size) verify_pvc_size(node, pvc_name, 1) verify_pv_size(node, pv_name, 1) oc_delete(node, 'pod', pod_name) wait_for_resource_absence(node, 'pod', pod_name) pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) cmd = ("dd if=/dev/urandom of=%sfile_new " "bs=50K count=10000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "Failed to execute command %s on %s" % (cmd, node))
def dynamic_provisioning_glusterfile(self, create_vol_name_prefix): # Create secret and storage class self.create_storage_class( create_vol_name_prefix=create_vol_name_prefix) # Create PVC pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(self.node, pvc_name) self.addCleanup(oc_delete, self.node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) # Verify Heketi volume name for prefix presence if provided if create_vol_name_prefix: ret = verify_volume_name_prefix(self.node, self.sc['volumenameprefix'], self.sc['secretnamespace'], pvc_name, self.sc['resturl']) self.assertTrue(ret, "verify volnameprefix failed") else: # Get the volume name and volume id from PV pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name) custom = [ r':spec.glusterfs.path', r':metadata.annotations.' r'"gluster\.kubernetes\.io\/heketi-volume-id"' ] pv_vol_name, vol_id = oc_get_custom_resource( self.ocp_client[0], 'pv', custom, pv_name) # check if the pv_volume_name is present in heketi # Check if volume name is "vol_"+volumeid or not heketi_vol_name = heketi_volume_info( self.ocp_client[0], self.heketi_server_url, vol_id, json=True)['name'] self.assertEqual(pv_vol_name, heketi_vol_name, 'Volume with vol_id = %s not found' 'in heketidb' % vol_id) self.assertEqual(heketi_vol_name, 'vol_' + vol_id, 'Volume with vol_id = %s have a' 'custom perfix' % vol_id) out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0], "gluster volume list") self.assertIn(pv_vol_name, out, "Volume with id %s does not exist" % vol_id) # Make sure we are able to work with files on the mounted volume filepath = "/mnt/file_for_testing_io.log" for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100", "ls -lrt %s", "rm -rf %s"): cmd = cmd % filepath ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( ret, 0, "Failed to execute '%s' command on %s" % (cmd, self.node))
def dynamic_provisioning_glusterfile(self, create_vol_name_prefix): # Create secret and storage class self.create_storage_class( create_vol_name_prefix=create_vol_name_prefix) # Create PVC pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(self.node, pvc_name) self.addCleanup(oc_delete, self.node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) # Verify Heketi volume name for prefix presence if provided if create_vol_name_prefix: ret = verify_volume_name_prefix(self.node, self.sc['volumenameprefix'], self.sc['secretnamespace'], pvc_name, self.sc['resturl']) self.assertTrue(ret, "verify volnameprefix failed") else: # Get the volume name and volume id from PV pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name) custom = [ r':spec.glusterfs.path', r':metadata.annotations.' r'"gluster\.kubernetes\.io\/heketi-volume-id"' ] pv_vol_name, vol_id = oc_get_custom_resource( self.ocp_client[0], 'pv', custom, pv_name) # check if the pv_volume_name is present in heketi # Check if volume name is "vol_"+volumeid or not heketi_vol_name = heketi_volume_info( self.ocp_client[0], self.heketi_server_url, vol_id, json=True)['name'] self.assertEqual(pv_vol_name, heketi_vol_name, 'Volume with vol_id = %s not found' 'in heketidb' % vol_id) self.assertEqual(heketi_vol_name, 'vol_' + vol_id, 'Volume with vol_id = %s have a' 'custom perfix' % vol_id) out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0], "gluster volume list") self.assertIn(pv_vol_name, out, "Volume with id %s does not exist" % vol_id) # Make sure we are able to work with files on the mounted volume filepath = "/mnt/file_for_testing_io.log" for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100", "ls -lrt %s", "rm -rf %s"): cmd = cmd % filepath ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( ret, 0, "Failed to execute '%s' command on %s" % (cmd, self.node))
def test_create_and_verify_pvc_with_volume_name_prefix(self): """create and verify pvc with volname prefix on an app pod""" if get_openshift_version() < "3.9": self.skipTest( "'volumenameprefix' option for Heketi is not supported" " in OCP older than 3.9") sc_name = self.create_storage_class(create_vol_name_prefix=True) pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name) namespace = (self.sc.get( 'secretnamespace', self.sc.get('restsecretnamespace', 'default'))) verify_volume_name_prefix( self.heketi_client_node, self.sc.get("volumenameprefix", "autotest"), namespace, pvc_name, self.heketi_server_url) self.create_dc_with_pvc(pvc_name)
def test_create_and_verify_pvc_with_volume_name_prefix(self): """create and verify pvc with volname prefix on an app pod""" if get_openshift_version() < "3.9": self.skipTest( "'volumenameprefix' option for Heketi is not supported" " in OCP older than 3.9") sc_name = self.create_storage_class(create_vol_name_prefix=True) pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name) namespace = (self.sc.get( 'secretnamespace', self.sc.get('restsecretnamespace', 'default'))) verify_volume_name_prefix( self.heketi_client_node, self.sc.get("volumenameprefix", "autotest"), namespace, pvc_name, self.heketi_server_url) self.create_dc_with_pvc(pvc_name)
def test_pv_resize_with_prefix_for_name(self, create_vol_name_prefix=False): """Validate PV resize with and without name prefix""" dir_path = "/mnt/" node = self.ocp_client[0] # Create PVC self.create_storage_class( allow_volume_expansion=True, create_vol_name_prefix=create_vol_name_prefix) pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(node, pvc_name) self.addCleanup(oc_delete, node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0) pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) if create_vol_name_prefix: ret = heketi_ops.verify_volume_name_prefix( node, self.sc['volumenameprefix'], self.sc['secretnamespace'], pvc_name, self.heketi_server_url) self.assertTrue(ret, "verify volnameprefix failed") cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, node)) cmd = ("dd if=/dev/urandom of=%sfile2 " "bs=100K count=10000") % dir_path with self.assertRaises(AssertionError): ret, out, err = oc_rsh(node, pod_name, cmd) msg = ("Command '%s' was expected to fail on '%s' node. " "But it returned following: ret is '%s', err is '%s' " "and out is '%s'" % (cmd, node, ret, err, out)) raise ExecutionError(msg) pvc_size = 2 resize_pvc(node, pvc_name, pvc_size) verify_pvc_size(node, pvc_name, pvc_size) pv_name = get_pv_name_from_pvc(node, pvc_name) verify_pv_size(node, pv_name, pvc_size) oc_delete(node, 'pod', pod_name) wait_for_resource_absence(node, 'pod', pod_name) pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) cmd = ("dd if=/dev/urandom of=%sfile_new " "bs=50K count=10000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, node))
def test_create_and_verify_pvc_with_volume_name_prefix(self): """create and verify pvc with volname prefix on an app pod""" if get_openshift_version() < "3.9": self.skipTest( "'volumenameprefix' option for Heketi is not supported" " in OCP older than 3.9") sc_name = self.create_storage_class(create_vol_name_prefix=True) pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name) namespace = (self.sc.get('secretnamespace', self.sc.get('restsecretnamespace', 'default'))) verify_volume_name_prefix(self.heketi_client_node, self.sc.get("volumenameprefix", "autotest"), namespace, pvc_name, self.heketi_server_url) self.create_dc_with_pvc(pvc_name) pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], pvc_name) endpoint = oc_get_custom_resource(self.ocp_master_node[0], "pv", ":spec.glusterfs.endpoints", name=pv_name) self.assertTrue( endpoint, "Failed to read Endpoints of %s on %s " % (pv_name, self.ocp_master_node[0]))
def test_pv_resize_with_prefix_for_name(self, create_vol_name_prefix=False): """Validate PV resize with and without name prefix""" dir_path = "/mnt/" node = self.ocp_client[0] # Create PVC self.create_storage_class( allow_volume_expansion=True, create_vol_name_prefix=create_vol_name_prefix) pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(node, pvc_name) self.addCleanup(oc_delete, node, 'dc', dc_name) self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0) pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) if create_vol_name_prefix: ret = heketi_ops.verify_volume_name_prefix( node, self.sc['volumenameprefix'], self.sc['secretnamespace'], pvc_name, self.heketi_server_url) self.assertTrue(ret, "verify volnameprefix failed") cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "failed to execute command %s on %s" % (cmd, node)) cmd = ("dd if=/dev/urandom of=%sfile2 " "bs=100K count=10000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertNotEqual( ret, 0, " This IO did not fail as expected " "command %s on %s" % (cmd, node)) pvc_size = 2 resize_pvc(node, pvc_name, pvc_size) verify_pvc_size(node, pvc_name, pvc_size) pv_name = get_pv_name_from_pvc(node, pvc_name) verify_pv_size(node, pv_name, pvc_size) oc_delete(node, 'pod', pod_name) wait_for_resource_absence(node, 'pod', pod_name) pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) cmd = ("dd if=/dev/urandom of=%sfile_new " "bs=50K count=10000") % dir_path ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "failed to execute command %s on %s" % (cmd, node))