def test_monitoring_delete_pvc(self): """ Test case to validate whether delete pvcs+configmap and recovery of a node where monitoring pods running has no functional impact """ # Get 'cluster-monitoring-config' configmap ocp_configmap = ocp.OCP( namespace=constants.MONITORING_NAMESPACE, kind="configmap" ) configmap_dict = ocp_configmap.get(resource_name="cluster-monitoring-config") dir_configmap = tempfile.mkdtemp(prefix="configmap_") yaml_file = f"{dir_configmap}/configmap.yaml" templating.dump_data_to_temp_yaml(configmap_dict, yaml_file) # Get prometheus and alertmanager pods prometheus_alertmanager_pods = pod.get_all_pods( namespace=defaults.OCS_MONITORING_NAMESPACE, selector=["prometheus", "alertmanager"], ) # Get all pvc on monitoring namespace pvc_objs_list = pvc.get_all_pvc_objs(namespace=constants.MONITORING_NAMESPACE) # Delete configmap ocp_configmap.delete(resource_name="cluster-monitoring-config") # Delete all pvcs on monitoring namespace pvc.delete_pvcs(pvc_objs=pvc_objs_list) # Check all the prometheus and alertmanager pods are up for pod_obj in prometheus_alertmanager_pods: wait_for_resource_state( resource=pod_obj, state=constants.STATUS_RUNNING, timeout=180 ) # Create configmap ocp_configmap.create(yaml_file=dir_configmap) # Check all the PVCs are up for pvc_obj in pvc_objs_list: wait_for_resource_state( resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180 ) # Check all the prometheus and alertmanager pods are up # and pvc are mounted on monitoring pods for pod_obj in prometheus_alertmanager_pods: wait_for_resource_state( resource=pod_obj, state=constants.STATUS_RUNNING, timeout=180 ) mount_point = pod_obj.exec_cmd_on_pod( command="df -kh", out_yaml_format=False, ) assert "/dev/rbd" in mount_point, f"pvc is not mounted on pod {pod.name}" log.info("Verified all pvc are mounted on monitoring pods") # Validate the prometheus health is ok assert prometheus_health_check(), "Prometheus cluster health is not OK"
def deploy_workload(self): """ Deployment specific to busybox workload """ self._deploy_prereqs() self.workload_namespace = self._get_workload_namespace() # load drpc.yaml drpc_yaml_data = templating.load_yaml(self.drpc_yaml_file) drpc_yaml_data["spec"][ "preferredCluster"] = self.preferred_primary_cluster templating.dump_data_to_temp_yaml(drpc_yaml_data, self.drpc_yaml_file) # TODO # drpc_yaml_file needs to be committed back to the repo # because ACM would refetch from repo directly # Create the resources on Hub cluster config.switch_acm_ctx() run_cmd(f"oc create -k {self.workload_subscription_dir}") run_cmd( f"oc create -k {self.workload_subscription_dir}/{self.workload_name}" ) self.verify_workload_deployment()
def set_upgrade_images(self): """ Set images for upgrade """ ocs_catalog = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) if not self.upgrade_in_current_source: if not ocs_catalog.is_exist() and not self.upgrade_in_current_source: log.info("OCS catalog source doesn't exist. Creating new one.") create_catalog_source(self.ocs_registry_image, ignore_upgrade=True) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() log.info(f"Current image is: {image_url}, tag: {image_tag}") version_change = ( self.get_parsed_versions()[1] > self.get_parsed_versions()[0] ) if self.ocs_registry_image: image_url, new_image_tag = self.ocs_registry_image.split(":") elif config.UPGRADE.get("upgrade_to_latest", True) or version_change: new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) image_for_upgrade = ":".join([image_url, new_image_tag]) log.info(f"Image: {image_for_upgrade} will be used for upgrade.") cs_data["spec"]["image"] = image_for_upgrade with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name)
def create(self, do_reload=True): log.info(f"Adding {self.kind} with name {self.name}") templating.dump_data_to_temp_yaml(self.data, self.temp_yaml.name) status = self.ocp.create(yaml_file=self.temp_yaml.name) if do_reload: self.reload() return status
def subscribe_ocs(self): """ This method subscription manifest and subscribe to OCS operator. """ subscription_yaml_data = templating.load_yaml( constants.SUBSCRIPTION_YAML) subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval') if subscription_plan_approval: subscription_yaml_data['spec']['installPlanApproval'] = ( subscription_plan_approval) channel = config.DEPLOYMENT.get('ocs_csv_channel') if channel: subscription_yaml_data['spec']['channel'] = channel subscription_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='subscription_manifest', delete=False) templating.dump_data_to_temp_yaml(subscription_yaml_data, subscription_manifest.name) run_cmd(f"oc create -f {subscription_manifest.name}") # wait for package manifest package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME) # Wait for package manifest is ready package_manifest.wait_for_resource(timeout=300) channel = config.DEPLOYMENT.get('ocs_csv_channel') subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval') if subscription_plan_approval == 'Manual': wait_for_install_plan_and_approve(self.namespace)
def create_external_secret(ocs_version=None, apply=False): """ Creates secret data for external cluster Args: ocs_version (str): OCS version apply (bool): True if want to use apply instead of create command """ ocs_version = ocs_version or config.ENV_DATA["ocs_version"] secret_data = templating.load_yaml(constants.EXTERNAL_CLUSTER_SECRET_YAML) if Version.coerce(ocs_version) >= Version.coerce("4.8"): external_cluster_details = config.EXTERNAL_MODE.get( "external_cluster_details_ocs48", "") else: external_cluster_details = config.EXTERNAL_MODE.get( "external_cluster_details", "") if not external_cluster_details: raise ExternalClusterDetailsException("No external cluster data found") secret_data["data"]["external_cluster_details"] = external_cluster_details secret_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="external_cluster_secret", delete=False) templating.dump_data_to_temp_yaml(secret_data, secret_data_yaml.name) logger.info( f"Creating external cluster secret for OCS version: {ocs_version}") oc_type = "apply" if apply else "create" run_cmd(f"oc {oc_type} -f {secret_data_yaml.name}")
def create_stage_operator_source(self): """ This prepare operator source for OCS deployment from stage. """ logger.info("Adding Stage Secret") # generate quay token credentials = { "user": { "username": config.DEPLOYMENT["stage_quay_username"], "password": config.DEPLOYMENT["stage_quay_password"], } } token = requests.post( url='https://quay.io/cnr/api/v1/users/login', data=json.dumps(credentials), headers={'Content-Type': 'application/json'}, ).json()['token'] stage_ns = config.DEPLOYMENT["stage_namespace"] # create Secret stage_os_secret = templating.load_yaml( constants.OPERATOR_SOURCE_SECRET_YAML ) stage_os_secret['metadata']['name'] = ( constants.OPERATOR_SOURCE_SECRET_NAME ) stage_os_secret['stringData']['token'] = token stage_secret_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix=constants.OPERATOR_SOURCE_SECRET_NAME, delete=False, ) templating.dump_data_to_temp_yaml( stage_os_secret, stage_secret_data_yaml.name ) run_cmd(f"oc create -f {stage_secret_data_yaml.name}") logger.info("Waiting 10 secs after secret is created") time.sleep(10) logger.info("Adding Stage Operator Source") # create Operator Source stage_os = templating.load_yaml( constants.OPERATOR_SOURCE_YAML ) stage_os['spec']['registryNamespace'] = stage_ns stage_os['spec']['authorizationToken']['secretName'] = ( constants.OPERATOR_SOURCE_SECRET_NAME ) stage_os_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix=constants.OPERATOR_SOURCE_NAME, delete=False ) templating.dump_data_to_temp_yaml( stage_os, stage_os_data_yaml.name ) run_cmd(f"oc create -f {stage_os_data_yaml.name}") catalog_source = CatalogSource( resource_name=constants.OPERATOR_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def update_csi_kms_vault_connection_details(update_config): """ Update the vault connection details in the resource csi-kms-connection-details Args: update_config (dict): A dictionary of vault info to be updated """ # Check if csi-kms-connection-details resource already exists # if not we might need to rise an exception because without # csi-kms-connection details we can't proceed with update csi_kms_conf = ocp.OCP( resource_name=constants.VAULT_KMS_CSI_CONNECTION_DETAILS, kind="ConfigMap", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE, ) try: csi_kms_conf.get() except CommandFailed: raise KMSConnectionDetailsError( "CSI KMS connection details don't exist, can't continue with update" ) if csi_kms_conf.data.get("metadata").get("annotations"): csi_kms_conf.data["metadata"].pop("annotations") for key in update_config.keys(): csi_kms_conf.data["data"].update({key: json.dumps(update_config[key])}) resource_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="csikmsconndetailsupdate", delete=False ) templating.dump_data_to_temp_yaml(csi_kms_conf.data, resource_data_yaml.name) run_cmd(f"oc apply -f {resource_data_yaml.name}", timeout=300)
def deploy_ocs_via_operator(self): """ Method for deploy OCS via OCS operator """ logger.info("Deployment of OCS via OCS operator") olm_manifest = self.get_olm_manifest() self.label_and_taint_nodes() run_cmd(f"oc create -f {olm_manifest}") # wait for package manifest package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME) # Wait for package manifest is ready package_manifest.wait_for_resource() channel = config.DEPLOYMENT.get('ocs_csv_channel') csv_name = package_manifest.get_current_csv(channel=channel) csv = CSV(resource_name=csv_name, kind="csv", namespace=self.namespace) csv.wait_for_phase("Succeeded") ocs_operator_storage_cluster_cr = config.DEPLOYMENT.get( 'ocs_operator_storage_cluster_cr') cluster_data = templating.load_yaml(ocs_operator_storage_cluster_cr) cluster_data['metadata']['name'] = config.ENV_DATA[ 'storage_cluster_name'] deviceset_data = templating.load_yaml(constants.DEVICESET_YAML) device_size = int( config.ENV_DATA.get('device_size', defaults.DEVICE_SIZE)) deviceset_data['dataPVCTemplate']['spec']['resources']['requests'][ 'storage'] = f"{device_size}Gi" cluster_data['spec']['storageDeviceSets'] = [deviceset_data] cluster_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix='cluster_storage', delete=False) templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name) run_cmd(f"oc create -f {cluster_data_yaml.name}")
def setup_ceph_debug(): """ Set Ceph to run in debug log level using a ConfigMap. This functionality is available starting OCS 4.7. """ ceph_debug_log_configmap_data = templating.load_yaml( constants.CEPH_CONFIG_DEBUG_LOG_LEVEL_CONFIGMAP) ocs_version = version.get_semantic_ocs_version_from_config() if ocs_version < version.VERSION_4_8: stored_values = constants.ROOK_CEPH_CONFIG_VALUES.split("\n") else: stored_values = constants.ROOK_CEPH_CONFIG_VALUES_48.split("\n") ceph_debug_log_configmap_data["data"]["config"] = ( stored_values + constants.CEPH_DEBUG_CONFIG_VALUES) ceph_configmap_yaml = tempfile.NamedTemporaryFile(mode="w+", prefix="config_map", delete=False) templating.dump_data_to_temp_yaml(ceph_debug_log_configmap_data, ceph_configmap_yaml.name) log.info( "Setting Ceph to work in debug log level using a new ConfigMap resource" ) run_cmd(f"oc create -f {ceph_configmap_yaml.name}")
def test_upgrade(): ceph_cluster = CephCluster() ceph_cluster.enable_health_monitor() namespace = config.ENV_DATA['cluster_namespace'] ocs_catalog = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace="openshift-marketplace", ) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() if config.DEPLOYMENT.get('upgrade_to_latest', True): new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) cs_data['spec']['image'] = ':'.join([image_url, new_image_tag]) package_manifest = PackageManifest(resource_name=OCS_OPERATOR_NAME) csv_name_pre_upgrade = package_manifest.get_current_csv() log.info(f"CSV name before upgrade is: {csv_name_pre_upgrade}") csv_pre_upgrade = CSV(resource_name=csv_name_pre_upgrade, namespace=namespace) pre_upgrade_images = get_images(csv_pre_upgrade.get()) with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name) # Wait for package manifest is ready package_manifest.wait_for_resource() subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval') if subscription_plan_approval == 'Manual': wait_for_install_plan_and_approve(namespace) attempts = 145 for attempt in range(1, attempts): if attempts == attempt: raise TimeoutException("No new CSV found after upgrade!") log.info(f"Attempt {attempt}/{attempts} to check CSV upgraded.") package_manifest.reload_data() csv_name_post_upgrade = package_manifest.get_current_csv() if csv_name_post_upgrade == csv_name_pre_upgrade: log.info(f"CSV is still: {csv_name_post_upgrade}") sleep(5) else: log.info(f"CSV now upgraded to: {csv_name_post_upgrade}") break csv_post_upgrade = CSV(resource_name=csv_name_post_upgrade, namespace=namespace) log.info( f"Waiting for CSV {csv_name_post_upgrade} to be in succeeded state") csv_post_upgrade.wait_for_phase("Succeeded", timeout=600) post_upgrade_images = get_images(csv_post_upgrade.get()) old_images, _, _ = get_upgrade_image_info(pre_upgrade_images, post_upgrade_images) verify_image_versions(old_images) ocs_install_verification(timeout=600, skip_osd_distribution_check=True) ceph_cluster.disable_health_monitor() if ceph_cluster.health_error_status: CephHealthException(f"During upgrade hit Ceph HEALTH_ERROR: " f"{ceph_cluster.health_error_status}")
def subscribe_ocs(self): """ This method subscription manifest and subscribe to OCS operator. """ live_deployment = config.DEPLOYMENT.get("live_deployment") if ( config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM and not live_deployment ): link_all_sa_and_secret_and_delete_pods(constants.OCS_SECRET, self.namespace) operator_selector = get_selector_for_ocs_operator() # wait for package manifest # For OCS version >= 4.9, we have odf-operator ocs_version = version.get_semantic_ocs_version_from_config() if ocs_version >= version.VERSION_4_9: ocs_operator_name = defaults.ODF_OPERATOR_NAME subscription_file = constants.SUBSCRIPTION_ODF_YAML else: ocs_operator_name = defaults.OCS_OPERATOR_NAME subscription_file = constants.SUBSCRIPTION_YAML package_manifest = PackageManifest( resource_name=ocs_operator_name, selector=operator_selector, ) # Wait for package manifest is ready package_manifest.wait_for_resource(timeout=300) default_channel = package_manifest.get_default_channel() subscription_yaml_data = templating.load_yaml(subscription_file) subscription_plan_approval = config.DEPLOYMENT.get("subscription_plan_approval") if subscription_plan_approval: subscription_yaml_data["spec"][ "installPlanApproval" ] = subscription_plan_approval custom_channel = config.DEPLOYMENT.get("ocs_csv_channel") if custom_channel: logger.info(f"Custom channel will be used: {custom_channel}") subscription_yaml_data["spec"]["channel"] = custom_channel else: logger.info(f"Default channel will be used: {default_channel}") subscription_yaml_data["spec"]["channel"] = default_channel if config.DEPLOYMENT.get("stage"): subscription_yaml_data["spec"]["source"] = constants.OPERATOR_SOURCE_NAME if config.DEPLOYMENT.get("live_deployment"): subscription_yaml_data["spec"]["source"] = config.DEPLOYMENT.get( "live_content_source", defaults.LIVE_CONTENT_SOURCE ) subscription_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="subscription_manifest", delete=False ) templating.dump_data_to_temp_yaml( subscription_yaml_data, subscription_manifest.name ) run_cmd(f"oc create -f {subscription_manifest.name}") logger.info("Sleeping for 15 seconds after subscribing OCS") if subscription_plan_approval == "Manual": wait_for_install_plan_and_approve(self.namespace)
def deploy_ocs_via_operator(self): """ Method for deploy OCS via OCS operator """ logger.info("Deployment of OCS via OCS operator") olm_manifest, subscription_manifest = ( self.get_olm_and_subscription_manifest()) self.label_and_taint_nodes() run_cmd(f"oc create -f {olm_manifest}") catalog_source = CatalogSource( resource_name='ocs-catalogsource', namespace='openshift-marketplace', ) # Wait for catalog source is ready catalog_source.wait_for_state("READY") run_cmd(f"oc create -f {subscription_manifest}") package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME) # Wait for package manifest is ready package_manifest.wait_for_resource() channel = config.DEPLOYMENT.get('ocs_csv_channel') csv_name = package_manifest.get_current_csv(channel=channel) csv = CSV(resource_name=csv_name, kind="csv", namespace=self.namespace) csv.wait_for_phase("Succeeded", timeout=400) ocs_operator_storage_cluster_cr = config.DEPLOYMENT.get( 'ocs_operator_storage_cluster_cr') cluster_data = templating.load_yaml(ocs_operator_storage_cluster_cr) cluster_data['metadata']['name'] = config.ENV_DATA[ 'storage_cluster_name'] deviceset_data = templating.load_yaml(constants.DEVICESET_YAML) device_size = int( config.ENV_DATA.get('device_size', defaults.DEVICE_SIZE)) deviceset_data['dataPVCTemplate']['spec']['resources']['requests'][ 'storage'] = f"{device_size}Gi" # Allow lower instance requests and limits for OCS deployment if config.DEPLOYMENT.get('allow_lower_instance_requirements'): none_resources = {'Requests': None, 'Limits': None} deviceset_data["resources"] = deepcopy(none_resources) cluster_data['spec']['resources'] = { resource: deepcopy(none_resources) for resource in ['mon', 'mds', 'rgw', 'mgr', 'noobaa'] } if self.platform.lower() == constants.VSPHERE_PLATFORM: cluster_data['spec']['monPVCTemplate']['spec'][ 'storageClassName'] = constants.DEFAULT_SC_VSPHERE deviceset_data['dataPVCTemplate']['spec'][ 'storageClassName'] = constants.DEFAULT_SC_VSPHERE cluster_data['spec']['storageDeviceSets'] = [deviceset_data] cluster_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix='cluster_storage', delete=False) templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name) run_cmd(f"oc create -f {cluster_data_yaml.name}")
def deploy_with_external_mode(self): """ This function handles the deployment of OCS on external/indpendent RHCS cluster """ live_deployment = config.DEPLOYMENT.get("live_deployment") logger.info("Deploying OCS with external mode RHCS") ui_deployment = config.DEPLOYMENT.get("ui_deployment") if not ui_deployment: logger.info("Creating namespace and operator group.") run_cmd(f"oc create -f {constants.OLM_YAML}") if not live_deployment: self.create_ocs_operator_source() self.subscribe_ocs() operator_selector = get_selector_for_ocs_operator() subscription_plan_approval = config.DEPLOYMENT.get( "subscription_plan_approval") package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME, selector=operator_selector, subscription_plan_approval=subscription_plan_approval, ) package_manifest.wait_for_resource(timeout=300) channel = config.DEPLOYMENT.get("ocs_csv_channel") csv_name = package_manifest.get_current_csv(channel=channel) csv = CSV(resource_name=csv_name, namespace=self.namespace) csv.wait_for_phase("Succeeded", timeout=720) # Create secret for external cluster secret_data = templating.load_yaml( constants.EXTERNAL_CLUSTER_SECRET_YAML) external_cluster_details = config.EXTERNAL_MODE.get( "external_cluster_details", "") if not external_cluster_details: raise ExternalClusterDetailsException( "No external cluster data found") secret_data["data"][ "external_cluster_details"] = external_cluster_details secret_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="external_cluster_secret", delete=False) templating.dump_data_to_temp_yaml(secret_data, secret_data_yaml.name) logger.info("Creating external cluster secret") run_cmd(f"oc create -f {secret_data_yaml.name}") cluster_data = templating.load_yaml( constants.EXTERNAL_STORAGE_CLUSTER_YAML) cluster_data["metadata"]["name"] = config.ENV_DATA[ "storage_cluster_name"] cluster_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="external_cluster_storage", delete=False) templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name) run_cmd(f"oc create -f {cluster_data_yaml.name}", timeout=2400) self.external_post_deploy_validation() setup_ceph_toolbox()
def create_catalog_source(image=None, ignore_upgrade=False): """ This prepare catalog source manifest for deploy OCS operator from quay registry. Args: image (str): Image of ocs registry. ignore_upgrade (bool): Ignore upgrade parameter. """ logger.info("Adding CatalogSource") if not image: image = config.DEPLOYMENT.get('ocs_registry_image', '') if not ignore_upgrade: upgrade = config.UPGRADE.get('upgrade', False) else: upgrade = False image_and_tag = image.split(':') image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == 'DS': image_tag = get_latest_ds_olm_tag( upgrade, latest_tag=config.DEPLOYMENT.get( 'default_latest_tag', 'latest' ) ) catalog_source_data = templating.load_yaml( constants.CATALOG_SOURCE_YAML ) cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME change_cs_condition = ( (image or image_tag) and catalog_source_data['kind'] == 'CatalogSource' and catalog_source_data['metadata']['name'] == cs_name ) if change_cs_condition: default_image = config.DEPLOYMENT['default_ocs_registry_image'] image = image if image else default_image.split(':')[0] catalog_source_data['spec']['image'] = ( f"{image}:{image_tag if image_tag else 'latest'}" ) catalog_source_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='catalog_source_manifest', delete=False ) templating.dump_data_to_temp_yaml( catalog_source_data, catalog_source_manifest.name ) run_cmd(f"oc create -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def subscribe_ocs(self): """ This method subscription manifest and subscribe to OCS operator. """ operator_selector = get_selector_for_ocs_operator() # wait for package manifest package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME, selector=operator_selector, ) # Wait for package manifest is ready package_manifest.wait_for_resource(timeout=300) default_channel = package_manifest.get_default_channel() subscription_yaml_data = templating.load_yaml( constants.SUBSCRIPTION_YAML ) subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval' ) if subscription_plan_approval: subscription_yaml_data['spec']['installPlanApproval'] = ( subscription_plan_approval ) custom_channel = config.DEPLOYMENT.get('ocs_csv_channel') if custom_channel: logger.info(f"Custom channel will be used: {custom_channel}") subscription_yaml_data['spec']['channel'] = custom_channel else: logger.info(f"Default channel will be used: {default_channel}") subscription_yaml_data['spec']['channel'] = default_channel if config.DEPLOYMENT.get('stage'): subscription_yaml_data['spec']['source'] = ( constants.OPERATOR_SOURCE_NAME ) if config.DEPLOYMENT.get('live_deployment'): subscription_yaml_data['spec']['source'] = ( config.DEPLOYMENT.get( 'live_content_source', defaults.LIVE_CONTENT_SOURCE ) ) subscription_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='subscription_manifest', delete=False ) templating.dump_data_to_temp_yaml( subscription_yaml_data, subscription_manifest.name ) run_cmd(f"oc create -f {subscription_manifest.name}") subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval' ) if subscription_plan_approval == 'Manual': wait_for_install_plan_and_approve(self.namespace)
def deploy_with_external_mode(self): """ This function handles the deployment of OCS on external/indpendent RHCS cluster """ live_deployment = config.DEPLOYMENT.get("live_deployment") logger.info("Deploying OCS with external mode RHCS") ui_deployment = config.DEPLOYMENT.get("ui_deployment") if not ui_deployment: logger.info("Creating namespace and operator group.") run_cmd(f"oc create -f {constants.OLM_YAML}") if not live_deployment: create_catalog_source() self.subscribe_ocs() operator_selector = get_selector_for_ocs_operator() subscription_plan_approval = config.DEPLOYMENT.get("subscription_plan_approval") ocs_version = version.get_semantic_ocs_version_from_config() if ocs_version >= version.VERSION_4_9: ocs_operator_names = [ defaults.ODF_OPERATOR_NAME, defaults.OCS_OPERATOR_NAME, ] else: ocs_operator_names = [defaults.OCS_OPERATOR_NAME] channel = config.DEPLOYMENT.get("ocs_csv_channel") for ocs_operator_name in ocs_operator_names: package_manifest = PackageManifest( resource_name=ocs_operator_name, selector=operator_selector, subscription_plan_approval=subscription_plan_approval, ) package_manifest.wait_for_resource(timeout=300) csv_name = package_manifest.get_current_csv(channel=channel) csv = CSV(resource_name=csv_name, namespace=self.namespace) csv.wait_for_phase("Succeeded", timeout=720) # Set rook log level self.set_rook_log_level() # Create secret for external cluster create_external_secret() cluster_data = templating.load_yaml(constants.EXTERNAL_STORAGE_CLUSTER_YAML) cluster_data["metadata"]["name"] = config.ENV_DATA["storage_cluster_name"] cluster_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="external_cluster_storage", delete=False ) templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name) run_cmd(f"oc create -f {cluster_data_yaml.name}", timeout=2400) self.external_post_deploy_validation() setup_ceph_toolbox()
def get_olm_and_subscription_manifest(self): """ This method prepare manifest for deploy OCS operator and subscription. Returns: tuple: Path to olm deploy and subscription manifest """ image = config.DEPLOYMENT.get('ocs_registry_image', '') image_and_tag = image.split(':') image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == 'DS': image_tag = get_latest_ds_olm_tag() ocs_operator_olm = config.DEPLOYMENT['ocs_operator_olm'] olm_data_generator = templating.load_yaml( ocs_operator_olm, multi_document=True ) olm_yaml_data = [] subscription_yaml_data = [] cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME # TODO: Once needed we can also set the channel for the subscription # from config.DEPLOYMENT.get('ocs_csv_channel') for yaml_doc in olm_data_generator: change_cs_condition = ( (image or image_tag) and yaml_doc['kind'] == 'CatalogSource' and yaml_doc['metadata']['name'] == cs_name ) if change_cs_condition: image_from_spec = yaml_doc['spec']['image'] image = image if image else image_from_spec.split(':')[0] yaml_doc['spec']['image'] = ( f"{image}:{image_tag if image_tag else 'latest'}" ) if yaml_doc.get('kind') == 'Subscription': subscription_yaml_data.append(yaml_doc) continue olm_yaml_data.append(yaml_doc) olm_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='olm_manifest', delete=False ) templating.dump_data_to_temp_yaml( olm_yaml_data, olm_manifest.name ) subscription_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='subscription_manifest', delete=False ) templating.dump_data_to_temp_yaml( subscription_yaml_data, subscription_manifest.name ) return olm_manifest.name, subscription_manifest.name
def create_stage_operator_source(self): """ This prepare operator source for OCS deployment from stage. """ logger.info("Adding Stage Secret") # generate quay token credentials = { "user": { "username": config.DEPLOYMENT["stage_quay_username"], "password": config.DEPLOYMENT["stage_quay_password"], } } token = requests.post( url='https://quay.io/cnr/api/v1/users/login', data=json.dumps(credentials), headers={'Content-Type': 'application/json'}, ).json()['token'] stage_ns = config.DEPLOYMENT["stage_namespace"] # create Secret stage_os_secret = templating.load_yaml( constants.STAGE_OPERATOR_SOURCE_SECRET_YAML ) stage_os_secret['metadata']['name'] = f"secret-{stage_ns}" stage_os_secret['stringData']['token'] = token stage_secret_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix=f"secret-{stage_ns}", delete=False ) templating.dump_data_to_temp_yaml( stage_os_secret, stage_secret_data_yaml.name ) run_cmd(f"oc apply -f {stage_secret_data_yaml.name}") logger.info("Adding Stage Operator Source") # create Operator Source stage_os = templating.load_yaml( constants.STAGE_OPERATOR_SOURCE_YAML ) stage_os['metadata']['name'] = stage_ns stage_os['spec']['registryNamespace'] = stage_ns stage_os['spec']['displayName'] = stage_ns stage_os['spec']['authorizationToken']['secretName'] = ( f"secret-{stage_ns}" ) stage_os_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix='secret', delete=False ) templating.dump_data_to_temp_yaml( stage_os, stage_os_data_yaml.name ) run_cmd(f"oc apply -f {stage_os_data_yaml.name}")
def create_optional_operators_catalogsource_non_ga(force=False): """ Creating optional operators CatalogSource and ImageContentSourcePolicy for non-ga OCP. Args: force (bool): enable/disable lso catalog setup """ ocp_version = version.get_semantic_ocp_version_from_config() ocp_ga_version = get_ocp_ga_version(ocp_version) if ocp_ga_version and not force: return optional_operators_data = list( templating.load_yaml(constants.LOCAL_STORAGE_OPTIONAL_OPERATORS, multi_document=True)) optional_operators_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="optional_operators", delete=False) if config.DEPLOYMENT.get("optional_operators_image"): for _dict in optional_operators_data: if _dict.get("kind").lower() == "catalogsource": _dict["spec"]["image"] = config.DEPLOYMENT.get( "optional_operators_image") if config.DEPLOYMENT.get("disconnected"): # in case of disconnected environment, we have to mirror all the # optional_operators images icsp = None for _dict in optional_operators_data: if _dict.get("kind").lower() == "catalogsource": index_image = _dict["spec"]["image"] if _dict.get("kind").lower() == "imagecontentsourcepolicy": icsp = _dict mirrored_index_image = (f"{config.DEPLOYMENT['mirror_registry']}/" f"{index_image.split('/', 1)[-1]}") prune_and_mirror_index_image( index_image, mirrored_index_image, constants.DISCON_CL_REQUIRED_PACKAGES, icsp, ) _dict["spec"]["image"] = mirrored_index_image templating.dump_data_to_temp_yaml(optional_operators_data, optional_operators_yaml.name) with open(optional_operators_yaml.name, "r") as f: logger.info(f.read()) logger.info( "Creating optional operators CatalogSource and ImageContentSourcePolicy" ) run_cmd(f"oc create -f {optional_operators_yaml.name}") wait_for_machineconfigpool_status("all")
def create_resource(self, resource_data, prefix=None): """ Given a dictionary of resource data, this function will creates oc resource Args: resource_data (dict): yaml dictionary for resource prefix (str): prefix for NamedTemporaryFile """ resource_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix=prefix, delete=False ) templating.dump_data_to_temp_yaml(resource_data, resource_data_yaml.name) run_cmd(f"oc create -f {resource_data_yaml.name}", timeout=300)
def mcg_only_deployment(): """ Creates cluster with MCG only deployment """ logger.info("Creating storage cluster with MCG only deployment") cluster_data = templating.load_yaml(constants.STORAGE_CLUSTER_YAML) cluster_data["spec"]["multiCloudGateway"] = {} cluster_data["spec"]["multiCloudGateway"][ "reconcileStrategy"] = "standalone" del cluster_data["spec"]["storageDeviceSets"] cluster_data_yaml = tempfile.NamedTemporaryFile(mode="w+", prefix="cluster_storage", delete=False) templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name) run_cmd(f"oc create -f {cluster_data_yaml.name}", timeout=1200)
def deploy_ocp(self, log_cli_level="DEBUG"): """ Deployment specific to OCP cluster on vSphere platform Args: log_cli_level (str): openshift installer's log level (default: "DEBUG") """ cluster_name_parts = config.ENV_DATA.get("cluster_name").split("-") prefix = cluster_name_parts[0] if not ( prefix.startswith(tuple(constants.PRODUCTION_JOBS_PREFIX)) or config.DEPLOYMENT.get("force_deploy_multiple_clusters") ): if self.check_cluster_existence(prefix): raise exceptions.SameNamePrefixClusterAlreadyExistsException( f"Cluster with name prefix {prefix} already exists. " f"Please destroy the existing cluster for a new cluster " f"deployment" ) super(VSPHEREUPI, self).deploy_ocp(log_cli_level) if config.ENV_DATA.get("scale_up"): logger.info("Adding extra nodes to cluster") self.add_nodes() # remove RHCOS compute nodes if config.ENV_DATA.get("scale_up") and not config.ENV_DATA.get("mixed_cluster"): rhcos_nodes = get_typed_worker_nodes() logger.info( f"RHCOS compute nodes to delete: " f"{[node.name for node in rhcos_nodes]}" ) logger.info("Removing RHCOS compute nodes from a cluster") remove_nodes(rhcos_nodes) if config.DEPLOYMENT.get("thick_sc"): sc_data = templating.load_yaml(constants.VSPHERE_THICK_STORAGECLASS_YAML) sc_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="storageclass", delete=False ) if config.DEPLOYMENT.get("eager_zeroed_thick_sc"): sc_data["parameters"]["diskformat"] = "eagerzeroedthick" else: sc_data["parameters"]["diskformat"] = "zeroedthick" templating.dump_data_to_temp_yaml(sc_data, sc_data_yaml.name) run_cmd(f"oc create -f {sc_data_yaml.name}") self.DEFAULT_STORAGECLASS = "thick"
def backup_deployments(self): """ Creates a backup of all deployments in the `openshift-storage` namespace """ deployment_names = [] deployments = self.dep_ocp.get("-o name", out_yaml_format=False) deployments_full_name = str(deployments).split() for name in deployments_full_name: deployment_names.append(name.lstrip("deployment.apps").lstrip("/")) for deployment in deployment_names: deployment_get = self.dep_ocp.get(resource_name=deployment) deployment_yaml = join(self.backup_dir, deployment + ".yaml") templating.dump_data_to_temp_yaml(deployment_get, deployment_yaml)
def create_ocs_secret(namespace): """ Function for creation of pull secret for OCS. (Mostly for ibmcloud purpose) Args: namespace (str): namespace where to create the secret """ secret_data = templating.load_yaml(constants.OCS_SECRET_YAML) docker_config_json = config.DEPLOYMENT["ocs_secret_dockerconfigjson"] secret_data["data"][".dockerconfigjson"] = docker_config_json secret_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="ocs_secret", delete=False ) templating.dump_data_to_temp_yaml(secret_data, secret_manifest.name) exec_cmd(f"oc apply -f {secret_manifest.name} -n {namespace}", timeout=2400)
def factory(db2u_project_name): """ Args: db2u_project_name (str): Name of the db2u project to be created. """ log.info("Creating Security Context Constraints") ocp_proj = ocp.OCP(namespace=db2u_project_name) template_yaml_dict = templating.load_yaml( constants.IBM_BDI_SCC_WORKLOAD_YAML) temp_scc_yaml.append( tempfile.NamedTemporaryFile(mode="w+", prefix="scc_yaml_", delete=False)) templating.dump_data_to_temp_yaml(template_yaml_dict, temp_scc_yaml[0].name) ocp_proj.exec_oc_cmd(command=f"create -f {temp_scc_yaml[0].name}")
def create_catalog_source(self): """ This prepare catalog source manifest for deploy OCS operator from quay registry. """ logger.info("Adding CatalogSource") image = config.DEPLOYMENT.get('ocs_registry_image', '') upgrade = config.DEPLOYMENT.get('upgrade', False) image_and_tag = image.split(':') image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == 'DS': image_tag = get_latest_ds_olm_tag( upgrade, latest_tag=config.DEPLOYMENT.get( 'default_latest_tag', 'latest' ) ) catalog_source_data = templating.load_yaml( constants.CATALOG_SOURCE_YAML ) cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME # TODO: Once needed we can also set the channel for the subscription # from config.DEPLOYMENT.get('ocs_csv_channel') change_cs_condition = ( (image or image_tag) and catalog_source_data['kind'] == 'CatalogSource' and catalog_source_data['metadata']['name'] == cs_name ) if change_cs_condition: default_image = config.DEPLOYMENT['default_ocs_registry_image'] image = image if image else default_image.split(':')[0] catalog_source_data['spec']['image'] = ( f"{image}:{image_tag if image_tag else 'latest'}" ) catalog_source_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='catalog_source_manifest', delete=False ) templating.dump_data_to_temp_yaml( catalog_source_data, catalog_source_manifest.name ) run_cmd(f"oc create -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace='openshift-marketplace', ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def test_upgrade(): namespace = config.ENV_DATA['cluster_namespace'] ocs_catalog = CatalogSource( resource_name=OPERATOR_CATALOG_SOURCE_NAME, namespace="openshift-marketplace", ) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() if config.DEPLOYMENT.get('upgrade_to_latest', True): new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) cs_data['spec']['image'] = ':'.join([image_url, new_image_tag]) package_manifest = PackageManifest(resource_name=OCS_OPERATOR_NAME) csv_name_pre_upgrade = package_manifest.get_current_csv() log.info(f"CSV name before upgrade is: {csv_name_pre_upgrade}") with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name) # Wait for package manifest is ready package_manifest.wait_for_resource() attempts = 145 for attempt in range(1, attempts): if attempts == attempt: raise TimeoutException("No new CSV found after upgrade!") log.info(f"Attempt {attempt}/{attempts} to check CSV upgraded.") package_manifest.reload_data() csv_name_post_upgrade = package_manifest.get_current_csv() if csv_name_post_upgrade == csv_name_pre_upgrade: log.info(f"CSV is still: {csv_name_post_upgrade}") sleep(5) else: log.info(f"CSV now upgraded to: {csv_name_post_upgrade}") break csv = CSV( resource_name=csv_name_post_upgrade, namespace=namespace ) log.info( f"Waiting for CSV {csv_name_post_upgrade} to be in succeeded state" ) csv.wait_for_phase("Succeeded", timeout=400) ocs_install_verification(timeout=600)
def setup_ceph_debug(): """ Set Ceph to run in debug log level using a ConfigMap. This functionality is available starting OCS 4.7. """ ceph_debug_log_configmap_data = templating.load_yaml( constants.CEPH_CONFIG_DEBUG_LOG_LEVEL_CONFIGMAP) ceph_debug_log_configmap_data["data"]["config"] = ( constants.ROOK_CEPH_CONFIG_VALUES + constants.CEPH_DEBUG_CONFIG_VALUES) ceph_configmap_yaml = tempfile.NamedTemporaryFile(mode="w+", prefix="config_map", delete=False) templating.dump_data_to_temp_yaml(ceph_debug_log_configmap_data, ceph_configmap_yaml.name) log.info( "Setting Ceph to work in debug log level using a new ConfigMap resource" ) run_cmd(f"oc create -f {ceph_configmap_yaml.name}")
def create_obc(self): """ OBC creation for RGW and Nooba only applicable for external cluster """ obc_rgw = templating.load_yaml(constants.RGW_OBC_YAML) obc_rgw_data_yaml = tempfile.NamedTemporaryFile(mode='w+', prefix='obc_rgw_data', delete=False) templating.dump_data_to_temp_yaml(obc_rgw, obc_rgw_data_yaml.name) logger.info("Creating OBC for rgw") run_cmd(f"oc create -f {obc_rgw_data_yaml.name}", timeout=2400) obc_nooba = templating.load_yaml(constants.MCG_OBC_YAML) obc_mcg_data_yaml = tempfile.NamedTemporaryFile(mode='w+', prefix='obc_mcg_data', delete=False) templating.dump_data_to_temp_yaml(obc_nooba, obc_mcg_data_yaml.name) logger.info("create OBC for mcg") run_cmd(f"oc create -f {obc_mcg_data_yaml.name}", timeout=2400)