def test_to_check_deletion_of_cluster(self): """Validate deletion of cluster with volumes""" # List heketi volumes g.log.info("List heketi volumes") volumes = heketi_volume_list(self.heketi_client_node, self.heketi_server_url, json=True) if (len(volumes["volumes"]) == 0): g.log.info("Creating heketi volume") out = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, self.volume_size, json=True) self.assertTrue(out, ("Failed to create heketi " "volume of size %s" % self.volume_size)) g.log.info("Heketi volume successfully created" % out) volume_id = out["bricks"][0]["volume"] self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, volume_id) # List heketi cluster's g.log.info("Listing heketi cluster list") out = heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(out, ("Failed to list heketi cluster")) g.log.info("All heketi cluster successfully listed") cluster_id = out["clusters"][0] # Deleting a heketi cluster g.log.info("Trying to delete a heketi cluster" " which contains volumes and/or nodes:" " Expected to fail") self.assertRaises( AssertionError, heketi_cluster_delete, self.heketi_client_node, self.heketi_server_url, cluster_id, ) g.log.info("Expected result: Unable to delete cluster %s" " because it contains volumes " " and/or nodes" % cluster_id) # To confirm deletion failed, check heketi cluster list g.log.info("Listing heketi cluster list") out = heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(out, ("Failed to list heketi cluster")) g.log.info("All heketi cluster successfully listed")
def test_to_check_deletion_of_cluster(self): """Validate deletion of cluster with volumes""" # List heketi volumes g.log.info("List heketi volumes") volumes = heketi_volume_list(self.heketi_client_node, self.heketi_server_url, json=True) if (len(volumes["volumes"]) == 0): g.log.info("Creating heketi volume") out = heketi_volume_create(self.heketi_client_node, self.heketi_server_url, self.volume_size, json=True) self.assertTrue(out, ("Failed to create heketi " "volume of size %s" % self.volume_size)) g.log.info("Heketi volume successfully created" % out) volume_id = out["bricks"][0]["volume"] self.addCleanup( heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, volume_id) # List heketi cluster's g.log.info("Listing heketi cluster list") out = heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(out, ("Failed to list heketi cluster")) g.log.info("All heketi cluster successfully listed") cluster_id = out["clusters"][0] # Deleting a heketi cluster g.log.info("Trying to delete a heketi cluster" " which contains volumes and/or nodes:" " Expected to fail") self.assertRaises( ExecutionError, heketi_cluster_delete, self.heketi_client_node, self.heketi_server_url, cluster_id, ) g.log.info("Expected result: Unable to delete cluster %s" " because it contains volumes " " and/or nodes" % cluster_id) # To confirm deletion failed, check heketi cluster list g.log.info("Listing heketi cluster list") out = heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(out, ("Failed to list heketi cluster")) g.log.info("All heketi cluster successfully listed")
def test_sc_create_with_clusterid(self): """Create storage class with 'cluster id'""" h_cluster_list = heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(h_cluster_list, "Failed to list heketi cluster") cluster_id = h_cluster_list["clusters"][0] sc = self.create_storage_class(clusterid=cluster_id) pvc_name = self.create_and_wait_for_pvc(sc_name=sc) # Validate if cluster id is correct in the heketi volume info pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], pvc_name) volume_id = oc_get_custom_resource( self.ocp_master_node[0], 'pv', r':metadata.annotations."gluster\.kubernetes\.io' r'\/heketi-volume-id"', name=pv_name)[0] volume_info = heketi_volume_info(self.heketi_client_node, self.heketi_server_url, volume_id, json=True) self.assertEqual( cluster_id, volume_info["cluster"], "Cluster ID %s has NOT been used to" "create the PVC %s. Found %s" % (cluster_id, pvc_name, volume_info["cluster"]))
def test_heketi_volume_create_with_clusterid(self): """Validate creation of heketi volume with clusters argument""" h_node, h_url = self.heketi_client_node, self.heketi_server_url # Get one of the cluster id from heketi cluster list creation_cluster_id = heketi_cluster_list(h_node, h_url, json=True)['clusters'][0] # Create a heketi volume specific to cluster list volume_id = heketi_volume_create(h_node, h_url, self.volume_size, clusters=creation_cluster_id, json=True)["bricks"][0]["volume"] self.addCleanup(heketi_volume_delete, self.heketi_client_node, self.heketi_server_url, volume_id) # Get the cluster id from heketi volume info info_cluster_id = heketi_volume_info(h_node, h_url, volume_id, json=True)['cluster'] # Match the creation cluster id with the info cluster id self.assertEqual( info_cluster_id, creation_cluster_id, "Volume creation cluster id {} not matching the info cluster id " "{}".format(creation_cluster_id, info_cluster_id))
def setUpClass(cls): super(TestHeketiZones, cls).setUpClass() clusters = heketi_ops.heketi_cluster_list(cls.heketi_client_node, cls.heketi_server_url, json=True) cls.cluster_id = clusters['clusters'][0] cls.allow_heketi_zones_update = g.config.get("common", {}).get( "allow_heketi_zones_update", False)
def test_heketi_metrics_validating_cluster_count(self): """Validate 'cluster count' in heketi metrics""" cluster_list = heketi_cluster_list( self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(cluster_list) self.assertTrue(cluster_list.get('clusters')) metrics = get_heketi_metrics( self.heketi_client_node, self.heketi_server_url) self.assertTrue(metrics) self.assertTrue(metrics.get('heketi_cluster_count')) self.assertEqual( len(cluster_list['clusters']), metrics['heketi_cluster_count'])
def test_heketi_metrics_validating_cluster_count(self): """Validate 'cluster count' in heketi metrics""" cluster_list = heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) self.assertTrue(cluster_list) self.assertTrue(cluster_list.get('clusters')) metrics = get_heketi_metrics(self.heketi_client_node, self.heketi_server_url) self.assertTrue(metrics) self.assertTrue(metrics.get('heketi_cluster_count')) self.assertEqual(len(cluster_list['clusters']), metrics['heketi_cluster_count'])
def test_heketi_cluster_list(self): """Test and validateheketi cluster list operation""" # Create heketi cluster cluster_info = heketi_ops.heketi_cluster_create( self.heketi_client_node, self.heketi_server_url, json=True) self.addCleanup(heketi_ops.heketi_cluster_delete, self.heketi_client_node, self.heketi_server_url, cluster_info["id"]) # Get heketi cluster list and validate presence of newly # created cluster cluster_list = heketi_ops.heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) err_msg = ("Cluster id %s not found in cluster list %s" % (cluster_info["id"], cluster_list["clusters"])) self.assertIn(cluster_info["id"], cluster_list["clusters"], err_msg)
def test_heketi_cluster_delete(self): """Test and validateheketi cluster delete operation""" # Create heketi cluster cluster_info = heketi_ops.heketi_cluster_create( self.heketi_client_node, self.heketi_server_url, json=True) # Delete newly created cluster heketi_ops.heketi_cluster_delete(self.heketi_client_node, self.heketi_server_url, cluster_info["id"]) # Get heketi cluster list and check for absence of deleted cluster cluster_list = heketi_ops.heketi_cluster_list(self.heketi_client_node, self.heketi_server_url, json=True) err_msg = ("Cluster id %s was not expected in cluster list %s" % (cluster_info["id"], cluster_list["clusters"])) self.assertNotIn(cluster_info["id"], cluster_list["clusters"], err_msg)
def test_heketi_metrics_validation_after_node(self, condition): """Validate heketi metrics after adding and remove node""" # Get additional node additional_host_info = g.config.get("additional_gluster_servers") if not additional_host_info: self.skipTest( "Skipping this test case as additional gluster server is " "not provied in config file") additional_host_info = list(additional_host_info.values())[0] storage_hostname = additional_host_info.get("manage") storage_ip = additional_host_info.get("storage") if not (storage_hostname and storage_ip): self.skipTest( "Config options 'additional_gluster_servers.manage' " "and 'additional_gluster_servers.storage' must be set.") h_client, h_server = self.heketi_client_node, self.heketi_server_url initial_node_count, final_node_count = 0, 0 # Get initial node count from prometheus metrics metric_result = self._fetch_metric_from_promtheus_pod( metric='heketi_nodes_count') initial_node_count = reduce( lambda x, y: x + y, [result.get('value')[1] for result in metric_result]) # Switch to storage project openshift_ops.switch_oc_project( self._master, self.storage_project_name) # Configure node before adding node self.configure_node_to_run_gluster(storage_hostname) # Get cluster list cluster_info = heketi_ops.heketi_cluster_list( h_client, h_server, json=True) # Add node to the cluster heketi_node_info = heketi_ops.heketi_node_add( h_client, h_server, len(self.gluster_servers), cluster_info.get('clusters')[0], storage_hostname, storage_ip, json=True) heketi_node_id = heketi_node_info.get("id") self.addCleanup( heketi_ops.heketi_node_delete, h_client, h_server, heketi_node_id, raise_on_error=False) self.addCleanup( heketi_ops.heketi_node_remove, h_client, h_server, heketi_node_id, raise_on_error=False) self.addCleanup( heketi_ops.heketi_node_disable, h_client, h_server, heketi_node_id, raise_on_error=False) self.addCleanup( openshift_ops.switch_oc_project, self._master, self.storage_project_name) if condition == 'delete': # Switch to openshift-monitoring project openshift_ops.switch_oc_project( self.ocp_master_node[0], self._prometheus_project_name) # Get initial node count from prometheus metrics for w in waiter.Waiter(timeout=60, interval=10): metric_result = self._fetch_metric_from_promtheus_pod( metric='heketi_nodes_count') node_count = reduce( lambda x, y: x + y, [result.get('value')[1] for result in metric_result]) if node_count != initial_node_count: break if w.expired: raise exceptions.ExecutionError( "Failed to get updated node details from prometheus") # Remove node from cluster heketi_ops.heketi_node_disable(h_client, h_server, heketi_node_id) heketi_ops.heketi_node_remove(h_client, h_server, heketi_node_id) for device in heketi_node_info.get('devices'): heketi_ops.heketi_device_delete( h_client, h_server, device.get('id')) heketi_ops.heketi_node_delete(h_client, h_server, heketi_node_id) # Switch to openshift-monitoring project openshift_ops.switch_oc_project( self.ocp_master_node[0], self._prometheus_project_name) # Get final node count from prometheus metrics for w in waiter.Waiter(timeout=60, interval=10): metric_result = self._fetch_metric_from_promtheus_pod( metric='heketi_nodes_count') final_node_count = reduce( lambda x, y: x + y, [result.get('value')[1] for result in metric_result]) if condition == 'delete': if final_node_count < node_count: break else: if final_node_count > initial_node_count: break if w.expired: raise exceptions.ExecutionError( "Failed to update node details in prometheus")
def test_heketi_node_add_with_valid_cluster(self): """Test heketi node add operation with valid cluster id""" if (openshift_storage_version.get_openshift_storage_version() < "3.11.4"): self.skipTest( "This test case is not supported for < OCS 3.11.4 builds due " "to bug BZ-1732831") h_client, h_server = self.heketi_client_node, self.heketi_server_url ocp_node = self.ocp_master_node[0] # Get heketi endpoints before adding node h_volume_ids = heketi_ops.heketi_volume_list( h_client, h_server, json=True) h_endpoints_before_new_node = heketi_ops.heketi_volume_endpoint_patch( h_client, h_server, h_volume_ids["volumes"][0]) cluster_info = heketi_ops.heketi_cluster_list( h_client, h_server, json=True) storage_hostname, storage_ip = self.add_heketi_node_to_cluster( cluster_info["clusters"][0]) # Get heketi nodes and validate for newly added node h_node_ids = heketi_ops.heketi_node_list(h_client, h_server, json=True) for h_node_id in h_node_ids: node_hostname = heketi_ops.heketi_node_info( h_client, h_server, h_node_id, json=True) if node_hostname["hostnames"]["manage"][0] == storage_hostname: break node_hostname = None err_msg = ("Newly added heketi node %s not found in heketi node " "list %s" % (storage_hostname, h_node_ids)) self.assertTrue(node_hostname, err_msg) # Check gluster peer status for newly added node if self.is_containerized_gluster(): gluster_pods = openshift_ops.get_ocp_gluster_pod_details(ocp_node) gluster_pod = [ gluster_pod["pod_name"] for gluster_pod in gluster_pods if gluster_pod["pod_hostname"] == storage_hostname][0] gluster_peer_status = peer_ops.get_peer_status( podcmd.Pod(ocp_node, gluster_pod)) else: gluster_peer_status = peer_ops.get_peer_status( storage_hostname) self.assertEqual( len(gluster_peer_status), len(self.gluster_servers)) err_msg = "Expected peer status is 1 and actual is %s" for peer in gluster_peer_status: peer_status = int(peer["connected"]) self.assertEqual(peer_status, 1, err_msg % peer_status) # Get heketi endpoints after adding node h_endpoints_after_new_node = heketi_ops.heketi_volume_endpoint_patch( h_client, h_server, h_volume_ids["volumes"][0]) # Get openshift openshift endpoints and patch with heketi endpoints heketi_db_endpoint = openshift_ops.oc_get_custom_resource( ocp_node, "dc", name=self.heketi_dc_name, custom=".:spec.template.spec.volumes[*].glusterfs.endpoints")[0] openshift_ops.oc_patch( ocp_node, "ep", heketi_db_endpoint, h_endpoints_after_new_node) self.addCleanup( openshift_ops.oc_patch, ocp_node, "ep", heketi_db_endpoint, h_endpoints_before_new_node) ep_addresses = openshift_ops.oc_get_custom_resource( ocp_node, "ep", name=heketi_db_endpoint, custom=".:subsets[*].addresses[*].ip")[0].split(",") err_msg = "Hostname %s not present in endpoints %s" % ( storage_ip, ep_addresses) self.assertIn(storage_ip, ep_addresses, err_msg)