def set_resource(self, resource): self.resource = resource if self.resource == 'mgr': self.resource_obj = pod.get_mgr_pods() self.type = 'rook-ceph' if self.resource == 'mon': self.resource_obj = pod.get_mon_pods() self.type = 'rook-ceph' if self.resource == 'osd': self.resource_obj = pod.get_osd_pods() self.type = 'rook-ceph' if self.resource == 'mds': self.resource_obj = pod.get_mds_pods() self.type = 'rook-ceph' if self.resource == 'cephfsplugin': self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHFILESYSTEM ) self.type = 'csi' if self.resource == 'rbdplugin': self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHBLOCKPOOL ) self.type = 'csi' self.resource_count = len(self.resource_obj)
def set_resource(self, resource, leader_type="provisioner"): self.resource = resource if (config.ENV_DATA["platform"] in constants.MANAGED_SERVICE_PLATFORMS) and (resource in CEPH_PODS): # If the platform is Managed Services, then the ceph pods will be present in the provider cluster. # Consumer cluster will be the primary cluster context in a multicluster run. Setting 'cluster_kubeconfig' # attribute to use as the value of the parameter '--kubeconfig' in the 'oc' commands to get ceph pods. provider_kubeconfig = os.path.join( config.clusters[ config.get_provider_index()].ENV_DATA["cluster_path"], config.clusters[config.get_provider_index()].RUN.get( "kubeconfig_location"), ) self.cluster_kubeconfig = provider_kubeconfig resource_count = 0 if self.resource == "mgr": self.resource_obj = pod.get_mgr_pods() self.selector = constants.MGR_APP_LABEL if self.resource == "mon": self.resource_obj = pod.get_mon_pods() self.selector = constants.MON_APP_LABEL if self.resource == "osd": self.resource_obj = pod.get_osd_pods() self.selector = constants.OSD_APP_LABEL if self.resource == "mds": self.resource_obj = pod.get_mds_pods() self.selector = constants.MDS_APP_LABEL if self.resource == "cephfsplugin": self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHFILESYSTEM) self.selector = constants.CSI_CEPHFSPLUGIN_LABEL if self.resource == "rbdplugin": self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHBLOCKPOOL) self.selector = constants.CSI_RBDPLUGIN_LABEL if self.resource == "cephfsplugin_provisioner": self.resource_obj = [ pod.get_plugin_provisioner_leader( interface=constants.CEPHFILESYSTEM, leader_type=leader_type) ] self.selector = constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_cephfsplugin_provisioner_pods()) if self.resource == "rbdplugin_provisioner": self.resource_obj = [ pod.get_plugin_provisioner_leader( interface=constants.CEPHBLOCKPOOL, leader_type=leader_type) ] self.selector = constants.CSI_RBDPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_rbdfsplugin_provisioner_pods()) if self.resource == "operator": self.resource_obj = pod.get_operator_pods() self.selector = constants.OPERATOR_LABEL self.resource_count = resource_count or len(self.resource_obj)
def node_replacement_verification_steps_user_side(old_node_name, new_node_name, new_osd_node_name, old_osd_id): """ Check the verification steps that the user should perform after the process of node replacement as described in the docs Args: old_node_name (str): The name of the old node that has been deleted new_node_name (str): The name of the new node that has been created new_osd_node_name (str): The name of the new node that has been added to osd nodes old_osd_id (str): The old osd id Returns: bool: True if all the verification steps passed. False otherwise """ ocs_nodes = get_ocs_nodes() ocs_node_names = [n.name for n in ocs_nodes] if new_node_name not in ocs_node_names: log.warning("The new node not found in ocs nodes") return False if old_node_name in ocs_node_names: log.warning("The old node name found in ocs nodes") return False csi_cephfsplugin_pods = pod.get_plugin_pods( interface=constants.CEPHFILESYSTEM) csi_rbdplugin_pods = pod.get_plugin_pods(interface=constants.CEPHBLOCKPOOL) csi_plugin_pods = csi_cephfsplugin_pods + csi_rbdplugin_pods if not all( [p.status() == constants.STATUS_RUNNING for p in csi_plugin_pods]): log.warning("Not all csi rbd and cephfs plugin pods in status running") return False # It can take some time until all the ocs pods are up and running # after the process of node replacement if not pod.wait_for_pods_to_be_running(): log.warning("Not all the pods in running state") return False new_osd_pod = get_node_pods(new_osd_node_name, pods_to_search=pod.get_osd_pods())[0] if not new_osd_pod: log.warning("Didn't find any osd pods running on the new node") return False new_osd_id = pod.get_osd_pod_id(new_osd_pod) if old_osd_id != new_osd_id: log.warning( f"The osd pod, that associated to the new node, has the id {new_osd_id} " f"instead of the expected osd id {old_osd_id}") return False log.info("Verification steps from the user side finish successfully") return True
def test_ceph_csidriver_runs_on_non_ocs_nodes( self, pvc_factory, pod_factory, add_nodes ): """ 1. Add non ocs nodes 2. Taint new nodes with app label 3. Check if plugin pods running on new nodes 4. Create app-pods on app_nodes """ # Add worker nodes and tainting it as app_nodes add_nodes(ocs_nodes=False, taint_label="nodetype=app:NoSchedule") # Checks for new plugin pod respinning on new app-nodes app_nodes = [node.name for node in get_worker_nodes_not_in_ocs()] interfaces = [constants.CEPHFILESYSTEM, constants.CEPHBLOCKPOOL] logger.info("Checking for plugin pods on non-ocs worker nodes") for interface in interfaces: pod_objs = get_plugin_pods(interface) for pod_obj in pod_objs: node_obj = get_pod_node(pod_obj) try: if node_obj.name in app_nodes: logger.info( f"The plugin pod {pod_obj.name} is running on app_node {node_obj.name}" ) continue except Exception as e: logging.info(f"Plugin pod was not found on {node_obj.name} - {e}") # Creates app-pods on app-nodes for node in app_nodes: pvc_obj = pvc_factory() pod_factory(pvc=pvc_obj, node_name=node)
def remove_global_id_reclaim(): """ Removes global id warning by re-spinning client and mon pods """ csi_pods = [] interfaces = [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM] for interface in interfaces: plugin_pods = get_plugin_pods(interface) csi_pods += plugin_pods cephfs_provisioner_pods = get_cephfsplugin_provisioner_pods() rbd_provisioner_pods = get_rbdfsplugin_provisioner_pods() csi_pods += cephfs_provisioner_pods csi_pods += rbd_provisioner_pods for csi_pod in csi_pods: csi_pod.delete() for mds_pod in get_mds_pods(): mds_pod.delete() for mds_pod in get_mds_pods(): wait_for_resource_state(resource=mds_pod, state=constants.STATUS_RUNNING) for mon in get_mon_pods(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE): mon.delete() mon_pods = get_mon_pods(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE) for mon in mon_pods: wait_for_resource_state(resource=mon, state=constants.STATUS_RUNNING)
def verify_multus_network(): """ Verify Multus network(s) created successfully and are present on relevant pods. """ with open(constants.MULTUS_YAML, mode="r") as f: multus_public_data = yaml.load(f) multus_namespace = multus_public_data["metadata"]["namespace"] multus_name = multus_public_data["metadata"]["name"] multus_public_network_name = f"{multus_namespace}/{multus_name}" log.info("Verifying multus NetworkAttachmentDefinitions") ocp.OCP( resource_name=multus_public_network_name, kind="network-attachment-definitions", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE, ) # TODO: also check if private NAD exists log.info("Verifying multus public network exists on ceph pods") osd_pods = get_osd_pods() for _pod in osd_pods: assert (_pod.data["metadata"]["annotations"] ["k8s.v1.cni.cncf.io/networks"] == multus_public_network_name) # TODO: also check private network if it exists on OSD pods mon_pods = get_mon_pods() mds_pods = get_mds_pods() mgr_pods = get_mgr_pods() rgw_pods = get_rgw_pods() ceph_pods = [*mon_pods, *mds_pods, *mgr_pods, *rgw_pods] for _pod in ceph_pods: assert (_pod.data["metadata"]["annotations"] ["k8s.v1.cni.cncf.io/networks"] == multus_public_network_name) log.info("Verifying multus public network exists on CSI pods") csi_pods = [] interfaces = [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM] for interface in interfaces: plugin_pods = get_plugin_pods(interface) csi_pods += plugin_pods cephfs_provisioner_pods = get_cephfsplugin_provisioner_pods() rbd_provisioner_pods = get_rbdfsplugin_provisioner_pods() csi_pods += cephfs_provisioner_pods csi_pods += rbd_provisioner_pods for _pod in csi_pods: assert (_pod.data["metadata"]["annotations"] ["k8s.v1.cni.cncf.io/networks"] == multus_public_network_name) log.info("Verifying StorageCluster multus network data") sc = get_storage_cluster() sc_data = sc.get().get("items")[0] network_data = sc_data["spec"]["network"] assert network_data["provider"] == "multus" selectors = network_data["selectors"] assert selectors[ "public"] == f"{defaults.ROOK_CLUSTER_NAMESPACE}/ocs-public"
def set_resource(self, resource, leader_type="provisioner"): self.resource = resource resource_count = 0 if self.resource == "mgr": self.resource_obj = pod.get_mgr_pods() self.selector = constants.MGR_APP_LABEL if self.resource == "mon": self.resource_obj = pod.get_mon_pods() self.selector = constants.MON_APP_LABEL if self.resource == "osd": self.resource_obj = pod.get_osd_pods() self.selector = constants.OSD_APP_LABEL if self.resource == "mds": self.resource_obj = pod.get_mds_pods() self.selector = constants.MDS_APP_LABEL if self.resource == "cephfsplugin": self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHFILESYSTEM) self.selector = constants.CSI_CEPHFSPLUGIN_LABEL if self.resource == "rbdplugin": self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHBLOCKPOOL) self.selector = constants.CSI_RBDPLUGIN_LABEL if self.resource == "cephfsplugin_provisioner": self.resource_obj = [ pod.get_plugin_provisioner_leader( interface=constants.CEPHFILESYSTEM, leader_type=leader_type) ] self.selector = constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_cephfsplugin_provisioner_pods()) if self.resource == "rbdplugin_provisioner": self.resource_obj = [ pod.get_plugin_provisioner_leader( interface=constants.CEPHBLOCKPOOL, leader_type=leader_type) ] self.selector = constants.CSI_RBDPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_rbdfsplugin_provisioner_pods()) if self.resource == "operator": self.resource_obj = pod.get_operator_pods() self.selector = constants.OPERATOR_LABEL self.resource_count = resource_count or len(self.resource_obj)
def set_resource(self, resource): self.resource = resource resource_count = 0 if self.resource == 'mgr': self.resource_obj = pod.get_mgr_pods() self.selector = constants.MGR_APP_LABEL if self.resource == 'mon': self.resource_obj = pod.get_mon_pods() self.selector = constants.MON_APP_LABEL if self.resource == 'osd': self.resource_obj = pod.get_osd_pods() self.selector = constants.OSD_APP_LABEL if self.resource == 'mds': self.resource_obj = pod.get_mds_pods() self.selector = constants.MDS_APP_LABEL if self.resource == 'cephfsplugin': self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHFILESYSTEM) self.selector = constants.CSI_CEPHFSPLUGIN_LABEL if self.resource == 'rbdplugin': self.resource_obj = pod.get_plugin_pods( interface=constants.CEPHBLOCKPOOL) self.selector = constants.CSI_RBDPLUGIN_LABEL if self.resource == 'cephfsplugin_provisioner': self.resource_obj = [ pod.plugin_provisioner_leader( interface=constants.CEPHFILESYSTEM) ] self.selector = constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_cephfsplugin_provisioner_pods()) if self.resource == 'rbdplugin_provisioner': self.resource_obj = [ pod.plugin_provisioner_leader( interface=constants.CEPHBLOCKPOOL) ] self.selector = constants.CSI_RBDPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_rbdfsplugin_provisioner_pods()) if self.resource == 'operator': self.resource_obj = pod.get_operator_pods() self.selector = constants.OPERATOR_LABEL self.resource_count = resource_count or len(self.resource_obj)
def set_resource(self, resource, leader_type="provisioner", cluster_index=None): self.resource = resource if (config.ENV_DATA["platform"] in constants.MANAGED_SERVICE_PLATFORMS) and ( resource in CEPH_PODS ): # If the platform is Managed Services, then the ceph pods will be present in the provider cluster. # Consumer cluster will be the primary cluster context in a multicluster run. Setting 'cluster_kubeconfig' # attribute to use as the value of the parameter '--kubeconfig' in the 'oc' commands to get ceph pods. provider_kubeconfig = os.path.join( config.clusters[config.get_provider_index()].ENV_DATA["cluster_path"], config.clusters[config.get_provider_index()].RUN.get( "kubeconfig_location" ), ) self.cluster_kubeconfig = provider_kubeconfig elif config.ENV_DATA["platform"] in constants.MANAGED_SERVICE_PLATFORMS: # cluster_index is used to identify the the cluster in which the pod is residing. If cluster_index is not # passed, assume that the context is already changed to the cluster where the pod is residing. cluster_index = ( cluster_index if cluster_index is not None else config.cur_index ) self.cluster_kubeconfig = os.path.join( config.clusters[cluster_index].ENV_DATA["cluster_path"], config.clusters[cluster_index].RUN.get("kubeconfig_location"), ) resource_count = 0 if self.resource == "mgr": self.resource_obj = pod.get_mgr_pods() self.selector = constants.MGR_APP_LABEL if self.resource == "mon": self.resource_obj = pod.get_mon_pods() self.selector = constants.MON_APP_LABEL if self.resource == "osd": self.resource_obj = pod.get_osd_pods() self.selector = constants.OSD_APP_LABEL if self.resource == "mds": self.resource_obj = pod.get_mds_pods() self.selector = constants.MDS_APP_LABEL if self.resource == "cephfsplugin": self.resource_obj = pod.get_plugin_pods(interface=constants.CEPHFILESYSTEM) self.selector = constants.CSI_CEPHFSPLUGIN_LABEL if self.resource == "rbdplugin": self.resource_obj = pod.get_plugin_pods(interface=constants.CEPHBLOCKPOOL) self.selector = constants.CSI_RBDPLUGIN_LABEL if self.resource == "cephfsplugin_provisioner": self.resource_obj = [ pod.get_plugin_provisioner_leader( interface=constants.CEPHFILESYSTEM, leader_type=leader_type ) ] self.selector = constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_cephfsplugin_provisioner_pods()) if self.resource == "rbdplugin_provisioner": self.resource_obj = [ pod.get_plugin_provisioner_leader( interface=constants.CEPHBLOCKPOOL, leader_type=leader_type ) ] self.selector = constants.CSI_RBDPLUGIN_PROVISIONER_LABEL resource_count = len(pod.get_rbdfsplugin_provisioner_pods()) if self.resource == "operator": self.resource_obj = pod.get_operator_pods() self.selector = constants.OPERATOR_LABEL if self.resource == "ocs_operator": self.resource_obj = [pod.get_ocs_operator_pod()] self.selector = constants.OCS_OPERATOR_LABEL if self.resource == "alertmanager_managed_ocs_alertmanager": self.resource_obj = pod.get_alertmanager_managed_ocs_alertmanager_pods() self.selector = constants.MANAGED_ALERTMANAGER_LABEL if self.resource == "ocs_osd_controller_manager": self.resource_obj = [pod.get_ocs_osd_controller_manager_pod()] self.selector = constants.MANAGED_CONTROLLER_LABEL # Setting resource_count because odf-operator-controller-manager pod also have the same label. resource_count = len( pod.get_pods_having_label( constants.MANAGED_CONTROLLER_LABEL, config.ENV_DATA["cluster_namespace"], ) ) if self.resource == "prometheus_managed_ocs_prometheus": self.resource_obj = [pod.get_prometheus_managed_ocs_prometheus_pod()] self.selector = constants.MANAGED_PROMETHEUS_LABEL if self.resource == "prometheus_operator": self.resource_obj = [pod.get_prometheus_operator_pod()] self.selector = constants.PROMETHEUS_OPERATOR_LABEL if self.resource == "ocs_provider_server": self.resource_obj = [pod.get_ocs_provider_server_pod()] self.selector = constants.PROVIDER_SERVER_LABEL self.resource_count = resource_count or len(self.resource_obj)