def get_replica_count(machine_set): """ Get replica count of a machine set Args: machine_set (str): Name of a machine set to get replica count Returns: replica count (int): replica count of a machine set """ machinesets_obj = OCP(kind=constants.MACHINESETS, namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE) return machinesets_obj.get( resource_name=machine_set).get("spec").get("replicas")
def get_pods_having_label(label, namespace): """ Fetches pod resources with given label in given namespace Args: label (str): label which pods might have namespace (str): Namespace in which to be looked up Return: dict: of pod info """ ocp_pod = OCP(kind=constants.POD, namespace=namespace) pods = ocp_pod.get(selector=label).get('items') return pods
def verify_pods_upgraded(old_images, selector, count=1, timeout=720): """ Verify that all pods do not have old image. Args: old_images (set): Set with old images. selector (str): Selector (e.g. app=ocs-osd) count (int): Number of resources for selector. timeout (int): Timeout in seconds to wait for pods to be upgraded. Raises: TimeoutException: If the pods didn't get upgraded till the timeout. """ namespace = config.ENV_DATA['cluster_namespace'] pod = OCP( kind=constants.POD, namespace=namespace, ) info_message = ( f"Waiting for {count} pods with selector: {selector} to be running " f"and upgraded.") logger.info(info_message) start_time = time.time() selector_label, selector_value = selector.split('=') while True: pod_count = 0 try: pods = get_all_pods(namespace, [selector_value], selector_label) pods_len = len(pods) logger.info(f"Found {pods_len} pod(s) for selector: {selector}") if pods_len != count: logger.warning( f"Number of found pods {pods_len} is not as expected: " f"{count}") for pod in pods: verify_images_upgraded(old_images, pod.get()) pod_count += 1 except CommandFailed as ex: logger.warning( f"Failed when getting pods with selector {selector}." f"Error: {ex}") except NonUpgradedImagesFoundError as ex: logger.warning(ex) check_timeout_reached(start_time, timeout, info_message) if pods_len != count: logger.error(f"Found pods: {pods_len} but expected: {count}!") elif pod_count == count: return
def refresh_connection(self): """ Login into OCP, refresh endpoint and token. """ ocp = OCP( kind=constants.ROUTE, namespace=defaults.OCS_MONITORING_NAMESPACE ) assert ocp.login(self._user, self._password), 'Login to OCP failed' self._token = ocp.get_user_token() route_obj = ocp.get( resource_name=defaults.PROMETHEUS_ROUTE ) self._endpoint = 'https://' + route_obj['spec']['host']
class RGW(object): """ Wrapper class for interaction with a cluster's RGW service """ def __init__(self, namespace=None): self.namespace = namespace if namespace else config.ENV_DATA['cluster_namespace'] if storagecluster_independent_check(): sc_name = constants.INDEPENDENT_DEFAULT_STORAGECLASS_RGW else: sc_name = constants.DEFAULT_STORAGECLASS_RGW self.storageclass = OCP( kind='storageclass', namespace=namespace, resource_name=sc_name ) self.s3_internal_endpoint = self.storageclass.get().get('parameters').get('endpoint') self.region = self.storageclass.get().get('parameters').get('region') # Todo: Implement retrieval in cases where CephObjectStoreUser is available self.key_id = None self.secret_key = None self.s3_resource = None
def get_cephfs_provisioner_pod(): """ Get the cephfs provisioner pod Returns: Pod object: The cephfs provisioner pod object """ ocp_pod_obj = OCP(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']) cephfs_provision_pod_items = ocp_pod_obj.get( selector='app=csi-cephfsplugin-provisioner')['items'] assert cephfs_provision_pod_items, "No cephfs provisioner pod found" ceph_pod = Pod(**cephfs_provision_pod_items[0]) return ceph_pod
def get_list_pvc_objs_created_on_monitoring_pods(): """ Returns list of pvc objects """ pvc_list = get_all_pvcs(namespace='openshift-monitoring') ocp_pvc_obj = OCP( kind=constants.PVC, namespace='openshift-monitoring' ) pvc_obj_list = [] for pvc in pvc_list['items']: pvc_dict = ocp_pvc_obj.get(resource_name=pvc.get('metadata').get('name')) pvc_obj = PVC(**pvc_dict) pvc_obj_list.append(pvc_obj) return pvc_obj_list
def get_pod_obj(name, namespace=None): """ Returns the pod obj for the given pod Args: name (str): Name of the resources Returns: obj : A pod object """ ocp_obj = OCP(api_version='v1', kind=constants.POD, namespace=namespace) ocp_dict = ocp_obj.get(resource_name=name) pod_obj = Pod(**ocp_dict) return pod_obj
def get_deployments_having_label(label, namespace): """ Fetches deployment resources with given label in given namespace Args: label (str): label which deployments might have namespace (str): Namespace in which to be looked up Return: list: deployment OCP instances """ ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace) pods = ocp_deployment.get(selector=label).get('items') return pods
def get_job_obj(name, namespace=defaults.ROOK_CLUSTER_NAMESPACE): """ Get the job instance for the given job name Args: name (str): The name of the job namespace (str): The namespace to look in Returns: OCS: A job OCS instance """ ocp_obj = OCP(kind=constants.JOB, namespace=namespace) ocp_dict = ocp_obj.get(resource_name=name) return OCS(**ocp_dict)
def get_ready_replica_count(machine_set): """ Get replica count which are in ready state in a machine set Args: machine_set (str): Machineset name Returns: ready_replica (int): replica count which are in ready state """ machinesets_obj = OCP(kind=constants.MACHINESETS, namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE) return (machinesets_obj.get( resource_name=machine_set).get("status").get("readyReplicas"))
def get_ocs_csv(): """ Get the OCS CSV object Returns: CSV: OCS CSV object Raises: CSVNotFound: In case no CSV found. """ ver = get_semantic_ocs_version_from_config() operator_base = ( defaults.OCS_OPERATOR_NAME if ver < VERSION_4_9 else defaults.ODF_OPERATOR_NAME ) operator_name = f"{operator_base}.openshift-storage" operator = OCP(kind="operator", resource_name=operator_name) if "Error" in operator.data: raise CSVNotFound(f"{operator_name} is not found, csv check will be skipped") namespace = config.ENV_DATA["cluster_namespace"] operator_selector = get_selector_for_ocs_operator() subscription_plan_approval = config.DEPLOYMENT.get("subscription_plan_approval") ocs_package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME, selector=operator_selector, subscription_plan_approval=subscription_plan_approval, ) channel = config.DEPLOYMENT.get("ocs_csv_channel") ocs_csv_name = None # OCS CSV is extracted from the available CSVs in cluster namespace # for Openshift dedicated platform if ( config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM ): ocp_cluster = OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="csv") for item in ocp_cluster.get()["items"]: if item["metadata"]["name"].startswith(defaults.OCS_OPERATOR_NAME): ocs_csv_name = item["metadata"]["name"] if not ocs_csv_name: raise CSVNotFound(f"No OCS CSV found for {config.ENV_DATA['platform']}") else: ocs_csv_name = ocs_package_manifest.get_current_csv(channel=channel) ocs_csv = CSV(resource_name=ocs_csv_name, namespace=namespace) log.info(f"Check if OCS operator: {ocs_csv_name} is in Succeeded phase.") ocs_csv.wait_for_phase(phase="Succeeded", timeout=600) return ocs_csv
def finalizer(): # Get the deployment replica count deploy_obj = OCP( kind=constants.DEPLOYMENT, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE, ) noobaa_deploy_obj = deploy_obj.get( resource_name=constants.NOOBAA_OPERATOR_DEPLOYMENT) if noobaa_deploy_obj["spec"]["replicas"] != 1: logger.info( f"Scaling back {constants.NOOBAA_OPERATOR_DEPLOYMENT} deployment to replica: 1" ) deploy_obj.exec_oc_cmd( f"scale deployment {constants.NOOBAA_OPERATOR_DEPLOYMENT} --replicas=1" )
def get_all_pvcs(namespace=None): """ Gets all pvc in given namespace Args: namespace (str): Name of namespace Returns: dict: Dict of all pvc in namespaces """ if not namespace: namespace = config.ENV_DATA['cluster_namespace'] ocp_pvc_obj = OCP(kind=constants.PVC, namespace=namespace) out = ocp_pvc_obj.get() return out
def get_provider(): """ Return the OCP Provider (Platform) Returns: str: The Provider that the OCP is running on """ ocp_cluster = OCP(kind="", resource_name="nodes") results = ocp_cluster.get("nodes")["items"][0]["spec"] if "providerID" in results: return results["providerID"].split(":")[0] else: return "BareMetal"
def refresh_connection(self): """ Login into OCP, refresh endpoint and token. """ ocp = OCP(kind=constants.ROUTE, namespace=defaults.OCS_MONITORING_NAMESPACE) kubeconfig = os.getenv("KUBECONFIG") kube_data = "" with open(kubeconfig, "r") as kube_file: kube_data = kube_file.readlines() assert ocp.login(self._user, self._password), "Login to OCP failed" self._token = ocp.get_user_token() with open(kubeconfig, "w") as kube_file: kube_file.writelines(kube_data) route_obj = ocp.get(resource_name=defaults.PROMETHEUS_ROUTE) self._endpoint = "https://" + route_obj["spec"]["host"]
def get_machinesets(): """ Get machine sets Returns: machine_sets (list): list of machine sets """ machine_sets = list() machinesets_obj = OCP(kind=constants.MACHINESETS, namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE) for machine in machinesets_obj.get()['items']: machine_sets.append(machine.get('spec').get('selector').get( 'matchLabels').get('machine.openshift.io/cluster-api-machineset') ) return machine_sets
def get_provider(): """ Return the OCP Provider (Platform) Returns: str: The Provider that the OCP is running on """ ocp_cluster = OCP(kind='', resource_name='nodes') results = ocp_cluster.get('nodes')['items'][0]['spec'] if 'providerID' in results: return results['providerID'].split(':')[0] else: return "BareMetal"
def create_ocs_jenkins_template(self): """ Create OCS Jenkins Template """ log.info("Create Jenkins Template, jenkins-persistent-ocs") ocp_obj = OCP(namespace="openshift", kind="template") tmp_dict = ocp_obj.get(resource_name="jenkins-persistent", out_yaml_format=True) tmp_dict["labels"]["app"] = "jenkins-persistent-ocs" tmp_dict["labels"]["template"] = "jenkins-persistent-ocs-template" tmp_dict["metadata"]["name"] = "jenkins-persistent-ocs" # Find Kind: 'PersistentVolumeClaim' position in the objects list, differs in OCP 4.5 and OCP 4.6. sc_name = (constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD if storagecluster_independent_check() else constants.DEFAULT_STORAGECLASS_RBD) for i in range(len(tmp_dict["objects"])): if tmp_dict["objects"][i]["kind"] == constants.PVC: tmp_dict["objects"][i]["metadata"]["annotations"] = { "volume.beta.kubernetes.io/storage-class": sc_name } tmp_dict["parameters"][4]["value"] = "10Gi" tmp_dict["parameters"].append({ "description": "Override jenkins options to speed up slave spawning", "displayName": "Override jenkins options to speed up slave spawning", "name": "JAVA_OPTS", "value": "-Dhudson.slaves.NodeProvisioner.initialDelay=0 " "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson." "slaves.NodeProvisioner.MARGIN0=0.85", }) if Version.coerce(self.ocp_version) >= Version.coerce("4.8"): # Added "Pipeline Utility Steps" plugin via Jenkins Template # OCP team changed the default plugin list on OCP4.9 tmp_dict["objects"][3]["spec"]["template"]["spec"]["containers"][ 0]["env"].append({ "name": "INSTALL_PLUGINS", "value": "scm-api:2.6.5,pipeline-utility-steps:2.12.0,workflow-step-api:622." "vb_8e7c15b_c95a_,workflow-cps:2648.va9433432b33c,workflow-api:2.47", }) ocs_jenkins_template_obj = OCS(**tmp_dict) ocs_jenkins_template_obj.create()
def get_list_pvc_objs_created_on_monitoring_pods(): """ Returns list of pvc objects created on monitoring pods Returns: list: List of pvc objs """ pvc_list = get_all_pvcs(namespace=defaults.OCS_MONITORING_NAMESPACE) ocp_pvc_obj = OCP(kind=constants.PVC, namespace=defaults.OCS_MONITORING_NAMESPACE) pvc_obj_list = [] for pvc in pvc_list["items"]: pvc_dict = ocp_pvc_obj.get(resource_name=pvc.get("metadata").get("name")) pvc_obj = PVC(**pvc_dict) pvc_obj_list.append(pvc_obj) return pvc_obj_list
def get_builds_obj(self, namespace): """ Get all jenkins builds Returns: List: jenkins deploy pod objects list """ build_obj_list = [] build_list = self.get_build_name_by_pattern( pattern=constants.JENKINS_BUILD, namespace=namespace) for build_name in build_list: ocp_obj = OCP(api_version='v1', kind='Build', namespace=namespace) ocp_dict = ocp_obj.get(resource_name=build_name) build_obj_list.append(OCS(**ocp_dict)) return build_obj_list
def get_csi_provisioner_pod(interface): """ Get the provisioner pod based on interface Returns: Pod object: The provisioner pod object based on iterface """ ocp_pod_obj = OCP(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']) selector = 'app=csi-rbdplugin-provisioner' if ( interface == constants.CEPHBLOCKPOOL) else 'app=csi-cephfsplugin-provisioner' provision_pod_items = ocp_pod_obj.get(selector=selector)['items'] assert provision_pod_items, f"No {interface} provisioner pod found" provisioner_pod = (Pod(**provision_pod_items[0]).name, Pod(**provision_pod_items[1]).name) return provisioner_pod
def get_ceph_tools_pod(): """ Get the Ceph tools pod Returns: Pod object: The Ceph tools pod object """ ocp_pod_obj = OCP( kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace'] ) ct_pod = ocp_pod_obj.get( selector='app=rook-ceph-tools' )['items'][0] assert ct_pod, f"No Ceph tools pod found" ceph_pod = Pod(**ct_pod) return ceph_pod
def get_all_pvcs(namespace=None, selector=None): """ Gets all pvc in given namespace Args: namespace (str): Name of namespace ('all-namespaces' to get all namespaces) selector (str): The label selector to look for Returns: dict: Dict of all pvc in namespaces """ all_ns = True if namespace == 'all-namespaces' else False if not namespace: namespace = config.ENV_DATA['cluster_namespace'] ocp_pvc_obj = OCP(kind=constants.PVC, namespace=namespace) out = ocp_pvc_obj.get(selector=selector, all_namespaces=all_ns) return out
def finalizer(): # Get the statefulset replica count sst_obj = OCP( kind=constants.STATEFULSET, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE, ) noobaa_db_sst_obj = sst_obj.get( resource_name=self.noobaa_db_sst_name) if noobaa_db_sst_obj["spec"]["replicas"] != 1: modify_statefulset_replica_count( statefulset_name=self.noobaa_db_sst_name, replica_count=1 ), f"Failed to scale up the statefulset {self.noobaa_db_sst_name}" try: self.restore_pvc_obj.delete() except CommandFailed as ex: if f'"{ self.restore_pvc_obj.name}" not found' not in str(ex): raise ex
def test_pv_scale_out(self, backingstore_factory): """ Test to check the scale out functionality of pv pool backing store. """ pv_backingstore = backingstore_factory( "OC", { "pv": [(1, MIN_PV_BACKINGSTORE_SIZE_IN_GB, "ocs-storagecluster-ceph-rbd")] }, )[0] logger.info(f"Scaling out PV Pool {pv_backingstore.name}") pv_backingstore.vol_num += 1 edit_pv_backingstore = OCP( kind="BackingStore", namespace=config.ENV_DATA["cluster_namespace"]) params = f'{{"spec":{{"pvPool":{{"numVolumes":{pv_backingstore.vol_num}}}}}}}' edit_pv_backingstore.patch(resource_name=pv_backingstore.name, params=params, format_type="merge") logger.info("Checking if backingstore went to SCALING state") sample = TimeoutSampler( timeout=60, sleep=5, func=check_pv_backingstore_status, backingstore_name=pv_backingstore.name, namespace=config.ENV_DATA["cluster_namespace"], desired_status="`SCALING`", ) assert sample.wait_for_func_status( result=True ), f"Backing Store {pv_backingstore.name} never reached SCALING state" logger.info("Waiting for backingstore to return to OPTIMAL state") wait_for_pv_backingstore(pv_backingstore.name, config.ENV_DATA["cluster_namespace"]) logger.info("Check if PV Pool scale out was successful") backingstore_dict = edit_pv_backingstore.get(pv_backingstore.name) assert (backingstore_dict["spec"]["pvPool"]["numVolumes"] == pv_backingstore.vol_num), "Scale out PV Pool failed. " logger.info("Scale out was successful")
def get_all_pods(namespace=None, selector=None, selector_label='app', exclude_selector=False, wait=False): """ Get all pods in a namespace. Args: namespace (str): Name of the namespace If namespace is None - get all pods selector (list) : List of the resource selector to search with. Example: ['alertmanager','prometheus'] selector_label (str): Label of selector (default: app). exclude_selector (bool): If list of the resource selector not to search with Returns: list: List of Pod objects """ ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) # In case of >4 worker nodes node failures automatic failover of pods to # other nodes will happen. # So, we are waiting for the pods to come up on new node if wait: wait_time = 180 logger.info(f"Waiting for {wait_time}s for the pods to stabilize") time.sleep(wait_time) pods = ocp_pod_obj.get()['items'] if selector: if exclude_selector: pods_new = [ pod for pod in pods if pod['metadata'].get('labels', {}).get( selector_label) not in selector ] else: pods_new = [ pod for pod in pods if pod['metadata'].get('labels', {}).get( selector_label) in selector ] pods = pods_new pod_objs = [Pod(**pod) for pod in pods] return pod_objs
def create_configmap_cluster_monitoring_pod(sc_name=None, telemeter_server_url=None): """ Create a configmap named cluster-monitoring-config based on the arguments. Args: sc_name (str): Name of the storage class which will be used for persistent storage needs of OCP Prometheus and Alert Manager. If not defined, the related options won't be present in the monitoring config map and the default (non persistent) storage will be used for OCP Prometheus and Alert Manager. telemeter_server_url (str): URL of Telemeter server where telemeter client (running in the cluster) will send it's telemetry data. If not defined, related option won't be present in the monitoring config map and the default (production) telemeter server will receive the metrics data. """ logger.info("Creating configmap cluster-monitoring-config") config_map = templating.load_yaml( constants.CONFIGURE_PVC_ON_MONITORING_POD) config = yaml.safe_load(config_map["data"]["config.yaml"]) if sc_name is not None: logger.info( f"Setting {sc_name} as storage backed for Prometheus and Alertmanager" ) config["prometheusK8s"]["volumeClaimTemplate"]["spec"][ "storageClassName"] = sc_name config["alertmanagerMain"]["volumeClaimTemplate"]["spec"][ "storageClassName"] = sc_name else: del config["prometheusK8s"] del config["alertmanagerMain"] if telemeter_server_url is not None: logger.info(f"Setting {telemeter_server_url} as telemeter server url") config["telemeterClient"] = {} config["telemeterClient"]["telemeterServerURL"] = telemeter_server_url config = yaml.dump(config) config_map["data"]["config.yaml"] = config assert helpers.create_resource(**config_map) ocp = OCP("v1", "ConfigMap", defaults.OCS_MONITORING_NAMESPACE) assert ocp.get(resource_name="cluster-monitoring-config") logger.info("Successfully created configmap cluster-monitoring-config")
def get_all_pvcs_in_storageclass(storage_class): """ This function returen all the PVCs in a given storage class Args: storage_class (str): name of the storage class Returns: out: list of PVC objects """ ocp_pvc_obj = OCP(kind=constants.PVC) pvc_list = ocp_pvc_obj.get(all_namespaces=True)['items'] out = [] for pvc in pvc_list: pvc_obj = PVC(**pvc) if pvc_obj.backed_sc == storage_class: out.append(pvc_obj) return out
def get_node_index_in_local_block(node_name): """ Get the node index in the node values as it appears in the local block resource Args: node_name (str): The node name to search for his index Returns: int: The node index in the nodeSelector values """ ocp_lvs_obj = OCP( kind=constants.LOCAL_VOLUME_SET, namespace=defaults.LOCAL_STORAGE_NAMESPACE, resource_name=constants.LOCAL_BLOCK_RESOURCE, ) node_selector = ocp_lvs_obj.get().get("spec").get("nodeSelector") node_values = (node_selector.get("nodeSelectorTerms")[0].get( "matchExpressions")[0].get("values")) return node_values.index(node_name)