def setUpClass(cls): """Initialize all the variables necessary for test cases.""" super(BaseClass, cls).setUpClass() # Initializes OCP config variables cls.ocp_servers_info = g.config['ocp_servers'] cls.ocp_master_node = g.config['ocp_servers']['master'].keys() cls.ocp_master_node_info = g.config['ocp_servers']['master'] cls.ocp_client = g.config['ocp_servers']['client'].keys() cls.ocp_client_info = g.config['ocp_servers']['client'] cls.ocp_nodes = g.config['ocp_servers']['nodes'].keys() cls.ocp_nodes_info = g.config['ocp_servers']['nodes'] # Initializes storage project config variables openshift_config = g.config.get("cns", g.config.get("openshift")) cls.storage_project_name = openshift_config.get( 'storage_project_name', openshift_config.get('setup', {}).get('cns_project_name')) # Initializes heketi config variables heketi_config = openshift_config['heketi_config'] cls.heketi_dc_name = heketi_config['heketi_dc_name'] cls.heketi_service_name = heketi_config['heketi_service_name'] cls.heketi_client_node = heketi_config['heketi_client_node'] cls.heketi_server_url = heketi_config['heketi_server_url'] cls.heketi_cli_user = heketi_config['heketi_cli_user'] cls.heketi_cli_key = heketi_config['heketi_cli_key'] cls.gluster_servers = g.config['gluster_servers'].keys() cls.gluster_servers_info = g.config['gluster_servers'] cls.storage_classes = openshift_config['dynamic_provisioning'][ 'storage_classes'] cls.sc = cls.storage_classes.get( 'storage_class1', cls.storage_classes.get('file_storage_class')) cmd = "echo -n %s | base64" % cls.heketi_cli_key ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") if ret != 0: raise ExecutionError("failed to execute cmd %s on %s out: %s " "err: %s" % (cmd, cls.ocp_master_node[0], out, err)) cls.secret_data_key = out.strip() # Checks if heketi server is alive if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url): raise ConfigError("Heketi server %s is not alive" % cls.heketi_server_url) # Switch to the storage project if not switch_oc_project(cls.ocp_master_node[0], cls.storage_project_name): raise ExecutionError("Failed to switch oc project on node %s" % cls.ocp_master_node[0]) if 'glustotest_run_id' not in g.config: g.config['glustotest_run_id'] = ( datetime.datetime.now().strftime('%H_%M_%d_%m_%Y')) cls.glustotest_run_id = g.config['glustotest_run_id'] msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) g.log.info(msg)
def test_delete_heketidb_volume(self): """ Method to test heketidb volume deletion via heketi-cli """ volume_id_list = [] heketidbexists = False msg = "Error: Cannot delete volume containing the Heketi database" for i in range(0, 2): volume_info = heketi_ops.heketi_volume_create( self.heketi_client_node, self.heketi_server_url, 10, json=True) self.assertNotEqual(volume_info, False, "Volume creation failed") volume_id_list.append(volume_info["id"]) self.addCleanup(self.delete_volumes, volume_id_list) volume_list_info = heketi_ops.heketi_volume_list( self.heketi_client_node, self.heketi_server_url, json=True) self.assertNotEqual(volume_list_info, False, "Heketi volume list command failed") if volume_list_info["volumes"] == []: raise ExecutionError("Heketi volume list empty") for volume_id in volume_list_info["volumes"]: volume_info = heketi_ops.heketi_volume_info( self.heketi_client_node, self.heketi_server_url, volume_id, json=True) if volume_info["name"] == "heketidbstorage": heketidbexists = True delete_ret, delete_output, delete_error = ( heketi_ops.heketi_volume_delete(self.heketi_client_node, self.heketi_server_url, volume_id, raw_cli_output=True)) self.assertNotEqual(delete_ret, 0, "Return code not 0") self.assertEqual( delete_error.strip(), msg, "Invalid reason for heketidb deletion failure") if not heketidbexists: raise ExecutionError( "Warning: heketidbstorage doesn't exist in list of volumes")
def test_dynamic_provisioning_glusterfile_glusterpod_failure(self): """Validate dynamic provisioning for gluster file when gluster pod down """ mount_path = "/mnt" datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id()) # Create secret and storage class self.create_storage_class() # Create PVC pvc_name = self.create_and_wait_for_pvc() # Create app POD with attached volume pod_name = oc_create_tiny_pod_with_volume( self.node, pvc_name, "test-pvc-mount-on-app-pod", mount_path=mount_path) self.addCleanup( wait_for_resource_absence, self.node, 'pod', pod_name) self.addCleanup(oc_delete, self.node, 'pod', pod_name) # Wait for app POD be up and running wait_for_pod_be_ready( self.node, pod_name, timeout=60, wait_step=2) # Run IO in background io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % ( pod_name, datafile_path) async_io = g.run_async(self.node, io_cmd, "root") # Pick up one of the hosts which stores PV brick (4+ nodes case) gluster_pod_data = get_gluster_pod_names_by_pvc_name( self.node, pvc_name)[0] # Delete glusterfs POD from chosen host and wait for spawn of new one oc_delete(self.node, 'pod', gluster_pod_data["pod_name"]) cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " "grep -v Terminating | awk '{print $1}'") % ( gluster_pod_data["host_name"]) for w in Waiter(600, 30): out = self.cmd_run(cmd) new_gluster_pod_name = out.strip().split("\n")[0].strip() if not new_gluster_pod_name: continue else: break if w.expired: error_msg = "exceeded timeout, new gluster pod not created" g.log.error(error_msg) raise ExecutionError(error_msg) new_gluster_pod_name = out.strip().split("\n")[0].strip() g.log.info("new gluster pod name is %s" % new_gluster_pod_name) wait_for_pod_be_ready(self.node, new_gluster_pod_name) # Check that async IO was not interrupted ret, out, err = async_io.async_communicate() self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
def _node_reboot(self): storage_hostname = ( g.config["gluster_servers"][self.gluster_servers[0]]["storage"]) cmd = "sleep 3; /sbin/shutdown -r now 'Reboot triggered by Glusto'" ret, out, err = g.run(storage_hostname, cmd) self.addCleanup(self._wait_for_gluster_pod_to_be_ready) if ret != 255: err_msg = "failed to reboot host %s error: %s" % (storage_hostname, err) g.log.error(err_msg) raise AssertionError(err_msg) try: g.ssh_close_connection(storage_hostname) except Exception as e: g.log.error("failed to close connection with host %s" " with error: %s" % (storage_hostname, e)) raise # added sleep as node will restart after 3 sec time.sleep(3) for w in Waiter(timeout=600, interval=10): try: if g.rpyc_get_connection(storage_hostname, user="******"): g.rpyc_close_connection(storage_hostname, user="******") break except Exception as err: g.log.info("exception while getting connection: '%s'" % err) if w.expired: error_msg = ("exceeded timeout 600 sec, node '%s' is " "not reachable" % storage_hostname) g.log.error(error_msg) raise ExecutionError(error_msg) # wait for the gluster pod to be in 'Running' state self._wait_for_gluster_pod_to_be_ready() # glusterd and gluster-blockd service should be up and running service_names = ("glusterd", "gluster-blockd", "tcmu-runner") for gluster_pod in self.gluster_pod_list: for service in service_names: g.log.info("gluster_pod - '%s' : gluster_service '%s'" % (gluster_pod, service)) check_service_status(self.oc_node, gluster_pod, service, "running")
def setUpClass(cls): """ setUpClass of HeketiBaseClass """ super(HeketiBaseClass, cls).setUpClass() # Initializes config variables openshift_config = g.config.get("cns", g.config.get("openshift")) cls.storage_project_name = openshift_config.get( 'storage_project_name', openshift_config.get('setup', {}).get('cns_project_name')) cls.ocp_master_nodes = g.config['ocp_servers']['master'].keys() cls.ocp_master_node = cls.ocp_master_nodes[0] heketi_config = openshift_config['heketi_config'] cls.heketi_dc_name = heketi_config['heketi_dc_name'] cls.heketi_service_name = heketi_config['heketi_service_name'] cls.heketi_client_node = heketi_config['heketi_client_node'] cls.heketi_server_url = heketi_config['heketi_server_url'] cls.heketi_cli_user = heketi_config['heketi_cli_user'] cls.heketi_cli_key = heketi_config['heketi_cli_key'] cls.gluster_servers = g.config['gluster_servers'].keys() cls.gluster_servers_info = g.config['gluster_servers'] # Checks if heketi server is alive if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url): raise ConfigError("Heketi server %s is not alive" % cls.heketi_server_url) # Switch to the storage project if not openshift_ops.switch_oc_project(cls.ocp_master_node, cls.storage_project_name): raise ExecutionError("Failed to switch oc project on node %s" % cls.ocp_master_node) # Have a unique string to recognize the test run for logging if 'glustotest_run_id' not in g.config: g.config['glustotest_run_id'] = ( datetime.datetime.now().strftime('%H_%M_%d_%m_%Y')) cls.glustotest_run_id = g.config['glustotest_run_id'] msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) g.log.info(msg)
def _wait_for_gluster_pod_to_be_ready(self): for gluster_pod in self.gluster_pod_list: for w in Waiter(timeout=600, interval=10): try: success = wait_for_pod_be_ready(self.oc_node, gluster_pod, timeout=1, wait_step=1) if success: break except ExecutionError as e: g.log.info("exception %s while validating gluster " "pod %s" % (e, gluster_pod)) if w.expired: error_msg = ("exceeded timeout 600 sec, pod '%s' is " "not in 'running' state" % gluster_pod) g.log.error(error_msg) raise ExecutionError(error_msg)
def delete_volumes(self, volume_ids): """ Delete volumes by their IDs and raise error with list of failures Input: (volume_ids) It can be a single volume ID or a list of volume IDs """ errored_ids = [] if not isinstance(volume_ids, (list, set, tuple)): volume_ids = [volume_ids] for volume_id in volume_ids: out = heketi_volume_delete(self.heketi_client_node, self.heketi_server_url, volume_id) output_str = 'Volume %s deleted' % volume_id if output_str not in out: errored_ids.append(volume_id) if errored_ids: raise ExecutionError("Failed to delete following heketi volumes: " "%s" % ',\n'.join(errored_ids))
def enable_pvc_resize(master_node): ''' This function edits the /etc/origin/master/master-config.yaml file - to enable pv_resize feature and restarts atomic-openshift service on master node Args: master_node (str): hostname of masternode on which want to edit the master-config.yaml file Returns: bool: True if successful, otherwise raise Exception ''' version = get_openshift_version() if version < "3.9": msg = ("pv resize is not available in openshift " "version %s " % version) g.log.error(msg) raise NotSupportedException(msg) try: conn = g.rpyc_get_connection(master_node, user="******") if conn is None: err_msg = ("Failed to get rpyc connection of node %s" % master_node) g.log.error(err_msg) raise ExecutionError(err_msg) with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f: data = yaml.load(f) dict_add = data['admissionConfig']['pluginConfig'] if "PersistentVolumeClaimResize" in dict_add: g.log.info("master-config.yaml file is already edited") return True dict_add['PersistentVolumeClaimResize'] = { 'configuration': { 'apiVersion': 'v1', 'disable': 'false', 'kind': 'DefaultAdmissionConfig' } } data['admissionConfig']['pluginConfig'] = dict_add kube_config = data['kubernetesMasterConfig'] for key in ('apiServerArguments', 'controllerArguments'): kube_config[key] = (kube_config.get(key) if isinstance( kube_config.get(key), dict) else {}) value = ['ExpandPersistentVolumes=true'] kube_config[key]['feature-gates'] = value with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f: yaml.dump(data, f, default_flow_style=False) except Exception as err: raise ExecutionError("failed to edit master-config.yaml file " "%s on %s" % (err, master_node)) finally: g.rpyc_close_connection(master_node, user="******") g.log.info("successfully edited master-config.yaml file " "%s" % master_node) if version == "3.9": cmd = ("systemctl restart atomic-openshift-master-api " "atomic-openshift-master-controllers") else: cmd = ("/usr/local/bin/master-restart api && " "/usr/local/bin/master-restart controllers") ret, out, err = g.run(master_node, cmd, "root") if ret != 0: err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % ( cmd, master_node, out, err) g.log.error(err_msg) raise ExecutionError(err_msg) return True